From 3ff2b6d8bb2b6540e99c7e635dcccc9fe67e69ed Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 13 Feb 2025 14:32:06 -0500 Subject: [PATCH 1/6] fix brackets for subs --- .../configuration-examples-fleet.md | 6 +-- .../cloud-on-k8s/configuration-fleet.md | 2 +- .../deploy/cloud-on-k8s/k8s_prerequisites.md | 2 +- .../install-elasticsearch-with-docker.md | 2 +- ...stall-elasticsearch-with-zip-on-windows.md | 2 +- .../self-managed/install-with-docker.md | 2 +- .../monitor/monitoring-data/kibana-alerts.md | 4 +- .../monitor-troubleshooting.md | 2 +- .../visualizing-monitoring-data.md | 2 +- .../collecting-log-data-with-filebeat.md | 6 +-- ...lecting-monitoring-data-with-metricbeat.md | 4 +- .../stack-monitoring/es-http-exporter.md | 4 +- .../es-legacy-collection-methods.md | 6 +-- .../stack-monitoring/es-local-exporter.md | 2 +- .../es-monitoring-collectors.md | 8 ++-- .../es-monitoring-exporters.md | 4 +- .../kibana-monitoring-data.md | 2 +- .../kibana-monitoring-metricbeat.md | 10 ++--- ...g-cipher-suites-for-stronger-encryption.md | 2 +- .../security/httprest-clients-security.md | 2 +- .../security/secure-clients-integrations.md | 2 +- .../security/set-up-minimal-security.md | 4 +- .../snapshot-and-restore/create-snapshots.md | 2 +- .../authentication-realms.md | 2 +- .../authorization-plugins.md | 2 +- .../controlling-user-cache.md | 2 +- .../cluster-or-deployment-auth/custom.md | 4 +- ...ing-privileges-for-data-streams-aliases.md | 2 +- .../realm-chains.md | 2 +- .../alerts/alerting-getting-started.md | 2 +- .../alerts-cases/alerts/alerting-setup.md | 2 +- .../alerts-cases/alerts/view-alerts.md | 2 +- .../alerts-cases/watcher/actions-webhook.md | 4 +- .../alerts-cases/watcher/managing-watches.md | 2 +- explore-analyze/machine-learning.md | 4 +- .../anomaly-detection/anomaly-how-tos.md | 4 +- .../anomaly-detection/ml-api-quickref.md | 4 +- .../anomaly-detection/ml-configuring-url.md | 2 +- .../anomaly-detection/move-jobs.md | 2 +- .../ml-dfa-classification.md | 4 +- .../data-frame-analytics/ml-dfa-concepts.md | 2 +- .../ml-dfa-custom-urls.md | 2 +- .../data-frame-analytics/ml-dfa-regression.md | 4 +- .../ml-feature-importance.md | 8 ++-- .../machine-learning/nlp/ml-nlp-e5.md | 2 +- .../machine-learning/nlp/ml-nlp-elser.md | 2 +- .../machine-learning/nlp/ml-nlp-overview.md | 4 +- .../setting-up-machine-learning.md | 2 +- explore-analyze/query-filter/tools/console.md | 2 +- .../reporting-troubleshooting-pdf.md | 2 +- .../transforms/ecommerce-transforms.md | 2 +- .../transforms/transform-alerts.md | 2 +- explore-analyze/transforms/transform-usage.md | 2 +- .../visualize/legacy-editors/tsvb.md | 2 +- .../agent-kafka-essink.md | 2 +- .../agent-kafka-ls.md | 2 +- .../ls-enrich.md | 4 +- .../ls-multi.md | 2 +- .../configure-lifecycle-policy.md | 4 +- .../index-management-in-kibana.md | 2 +- .../tutorial-automate-rollover.md | 2 +- ...y-configure-endpoint-integration-policy.md | 8 ++-- .../serverless/security-machine-learning.md | 4 +- .../active-directory-realm.md | 2 +- .../bootstrap-checks-xpack.md | 6 +-- .../elasticsearch-reference/built-in-roles.md | 2 +- .../elasticsearch-reference/defining-roles.md | 2 +- .../elasticsearch-reference/file-realm.md | 4 +- .../index-lifecycle-management.md | 2 +- .../elasticsearch-reference/index-mgmt.md | 2 +- .../elasticsearch-reference/ip-filtering.md | 8 ++-- .../elasticsearch-reference/kerberos-realm.md | 2 +- .../elasticsearch-reference/ldap-realm.md | 4 +- .../elasticsearch-reference/mapping-roles.md | 4 +- .../monitor-elasticsearch-cluster.md | 2 +- .../monitoring-production.md | 2 +- .../elasticsearch-reference/native-realm.md | 2 +- .../elasticsearch-reference/oidc-realm.md | 2 +- .../role-mapping-resources.md | 2 +- .../saml-guide-stack.md | 2 +- .../elasticsearch-reference/saml-realm.md | 4 +- .../secure-monitoring.md | 2 +- .../elasticsearch-reference/security-files.md | 2 +- .../elasticsearch-reference/xpack-rollup.md | 2 +- .../fleet-agent-serverless-restrictions.md | 4 +- .../Security-production-considerations.md | 4 +- .../kibana/kibana/elasticsearch-mutual-tls.md | 2 +- .../kibana/kibana/xpack-security.md | 2 +- .../apm-open-telemetry-direct.md | 2 +- .../configure-endpoint-integration-policy.md | 8 ++-- .../security/data-views-in-sec.md | 2 +- .../security/machine-learning.md | 4 +- .../elastic-stack/air-gapped-install.md | 16 +++---- .../elastic-stack/upgrading-elastic-stack.md | 2 +- serverless/pages/action-connectors.asciidoc | 2 +- serverless/pages/api-keys.asciidoc | 4 +- .../apis-elasticsearch-conventions.asciidoc | 24 +++++----- serverless/pages/apis-http-apis.asciidoc | 2 +- .../clients-dot-net-getting-started.asciidoc | 4 +- .../pages/clients-go-getting-started.asciidoc | 6 +-- .../clients-java-getting-started.asciidoc | 8 ++-- .../clients-nodejs-getting-started.asciidoc | 2 +- .../clients-php-getting-started.asciidoc | 4 +- .../clients-python-getting-started.asciidoc | 4 +- .../clients-ruby-getting-started.asciidoc | 6 +-- serverless/pages/clients.asciidoc | 2 +- serverless/pages/cloud-regions.asciidoc | 2 +- .../pages/connecting-to-es-endpoint.asciidoc | 8 ++-- serverless/pages/custom-roles.asciidoc | 14 +++--- serverless/pages/data-views.asciidoc | 44 +++++++++---------- .../pages/debug-grok-expressions.asciidoc | 8 ++-- .../developer-tools-troubleshooting.asciidoc | 8 ++-- .../elasticsearch-developer-tools.asciidoc | 4 +- ...ore-your-data-ml-nlp-deploy-model.asciidoc | 6 +-- .../explore-your-data-ml-nlp-elser.asciidoc | 4 +- ...ore-your-data-ml-nlp-import-model.asciidoc | 6 +-- ...xplore-your-data-ml-nlp-model-ref.asciidoc | 6 +-- ...lore-your-data-ml-nlp-ner-example.asciidoc | 2 +- ...e-your-data-ml-nlp-search-compare.asciidoc | 2 +- ...e-your-data-ml-nlp-test-inference.asciidoc | 4 +- ...ata-ml-nlp-text-embedding-example.asciidoc | 2 +- .../pages/explore-your-data-ml-nlp.asciidoc | 6 +-- serverless/pages/explore-your-data.asciidoc | 4 +- serverless/pages/get-started.asciidoc | 8 ++-- serverless/pages/index-management.asciidoc | 2 +- serverless/pages/ingest-pipelines.asciidoc | 4 +- ...your-data-ingest-data-through-api.asciidoc | 4 +- ...t-data-through-integrations-beats.asciidoc | 2 +- ...ugh-integrations-connector-client.asciidoc | 12 ++--- ...ata-through-integrations-logstash.asciidoc | 16 +++---- .../ingest-your-data-upload-file.asciidoc | 6 +-- serverless/pages/ingest-your-data.asciidoc | 8 ++-- serverless/pages/machine-learning.asciidoc | 18 ++++---- serverless/pages/manage-org.asciidoc | 2 +- .../manage-your-project-rest-api.asciidoc | 2 +- serverless/pages/manage-your-project.asciidoc | 2 +- serverless/pages/ml-nlp-auto-scale.asciidoc | 4 +- serverless/pages/pricing.asciidoc | 4 +- .../profile-queries-and-aggregations.asciidoc | 4 +- .../pages/project-settings-data.asciidoc | 4 +- serverless/pages/rules.asciidoc | 10 ++--- .../run-api-requests-in-the-console.asciidoc | 2 +- serverless/pages/search-playground.asciidoc | 2 +- .../search-your-data-the-search-api.asciidoc | 6 +-- serverless/pages/search-your-data.asciidoc | 6 +-- .../pages/serverless-differences.asciidoc | 14 +++--- serverless/pages/sign-up.asciidoc | 4 +- serverless/pages/transforms.asciidoc | 2 +- .../pages/welcome-to-serverless.asciidoc | 6 +-- .../what-is-elasticsearch-serverless.asciidoc | 16 +++---- .../field-mappings-dense-vector.asciidoc | 2 +- .../partials/field-mappings-elser.asciidoc | 2 +- .../observability/apps/installation-layout.md | 10 ++--- .../observability/apps/monitor-apm-server.md | 2 +- .../apps/monitor-fleet-managed-apm-server.md | 4 +- .../use-metricbeat-to-send-monitoring-data.md | 4 +- .../monitor-microsoft-azure-with-beats.md | 2 +- .../tutorial-observe-nginx-instances.md | 2 +- .../logs/categorize-log-entries.md | 2 +- .../logs/inspect-log-anomalies.md | 2 +- troubleshoot/ingest/fleet/common-problems.md | 14 +++--- troubleshoot/kibana/monitoring.md | 2 +- 162 files changed, 359 insertions(+), 359 deletions(-) diff --git a/deploy-manage/deploy/cloud-on-k8s/configuration-examples-fleet.md b/deploy-manage/deploy/cloud-on-k8s/configuration-examples-fleet.md index fab24a105d..9cea9876e7 100644 --- a/deploy-manage/deploy/cloud-on-k8s/configuration-examples-fleet.md +++ b/deploy-manage/deploy/cloud-on-k8s/configuration-examples-fleet.md @@ -12,16 +12,16 @@ The examples in this section are for illustration purposes only and should not b :::: -## System and {{k8s}} {integrations} [k8s_system_and_k8s_integrations] +## System and {{k8s}} {{integrations}} [k8s_system_and_k8s_integrations] ```sh kubectl apply -f https://raw.githubusercontent.com/elastic/cloud-on-k8s/2.16/config/recipes/elastic-agent/fleet-kubernetes-integration.yaml ``` -Deploys {{agent}} as a DaemonSet in {{fleet}} mode with System and {{k8s}} {integrations} enabled. System integration collects syslog logs, auth logs and system metrics (for CPU, I/O, filesystem, memory, network, process and others). {{k8s}} {integrations} collects API server, Container, Event, Node, Pod, Volume and system metrics. +Deploys {{agent}} as a DaemonSet in {{fleet}} mode with System and {{k8s}} {{integrations}} enabled. System integration collects syslog logs, auth logs and system metrics (for CPU, I/O, filesystem, memory, network, process and others). {{k8s}} {{integrations}} collects API server, Container, Event, Node, Pod, Volume and system metrics. -## System and {{k8s}} {integrations} running as non-root [k8s_system_and_k8s_integrations_running_as_non_root] +## System and {{k8s}} {{integrations}} running as non-root [k8s_system_and_k8s_integrations_running_as_non_root] ```sh kubectl apply -f https://raw.githubusercontent.com/elastic/cloud-on-k8s/2.16/config/recipes/elastic-agent/fleet-kubernetes-integration-nonroot.yaml diff --git a/deploy-manage/deploy/cloud-on-k8s/configuration-fleet.md b/deploy-manage/deploy/cloud-on-k8s/configuration-fleet.md index 70a8e35ab7..c953f78d2a 100644 --- a/deploy-manage/deploy/cloud-on-k8s/configuration-fleet.md +++ b/deploy-manage/deploy/cloud-on-k8s/configuration-fleet.md @@ -216,7 +216,7 @@ roleRef: ## Deploy {{agent}} in secured clusters [k8s-elastic-agent-fleet-configuration-deploying-in-secured-clusters] -To deploy {{agent}} in clusters with the Pod Security Policy admission controller enabled, or in [OpenShift](k8s-openshift-agent.md) clusters, you might need to grant additional permissions to the Service Account used by the {{agent}} Pods. Those Service Accounts must be bound to a Role or ClusterRole that has `use` permission for the required Pod Security Policy or Security Context Constraints. Different {{agent}} {integrations} might require different settings set in their PSP/[SCC](k8s-openshift-agent.md). +To deploy {{agent}} in clusters with the Pod Security Policy admission controller enabled, or in [OpenShift](k8s-openshift-agent.md) clusters, you might need to grant additional permissions to the Service Account used by the {{agent}} Pods. Those Service Accounts must be bound to a Role or ClusterRole that has `use` permission for the required Pod Security Policy or Security Context Constraints. Different {{agent}} {{integrations}} might require different settings set in their PSP/[SCC](k8s-openshift-agent.md). ## Customize {{fleet-server}} Service [k8s-elastic-agent-fleet-configuration-customize-fleet-server-service] diff --git a/deploy-manage/deploy/cloud-on-k8s/k8s_prerequisites.md b/deploy-manage/deploy/cloud-on-k8s/k8s_prerequisites.md index 3521f49c11..4eec0fb27d 100644 --- a/deploy-manage/deploy/cloud-on-k8s/k8s_prerequisites.md +++ b/deploy-manage/deploy/cloud-on-k8s/k8s_prerequisites.md @@ -172,7 +172,7 @@ spec: podSelector: matchLabels: common.k8s.elastic.co/type: elasticsearch - # [Optional] Restrict to a single {es} cluster named hulk. + # [Optional] Restrict to a single {{es}} cluster named hulk. # elasticsearch.k8s.elastic.co/cluster-name=hulk - ports: - port: 53 diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md index 21d7110a97..118c7f4d74 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md @@ -202,7 +202,7 @@ To remove the containers and their network, run: # Remove the Elastic network docker network rm elastic -# Remove {es} containers +# Remove {{es}} containers docker rm es01 docker rm es02 diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md index 2dbbde7632..066b87baaa 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-zip-on-windows.md @@ -10,7 +10,7 @@ mapped_pages: This package contains both free and subscription features. [Start a 30-day trial](https://www.elastic.co/guide/en/elasticsearch/reference/current/license-settings.html) to try out all of the features. ::::{note} -On Windows the {{es}} {ml} feature requires the Microsoft Universal C Runtime library. This is built into Windows 10, Windows Server 2016 and more recent versions of Windows. For older versions of Windows it can be installed via Windows Update, or from a [separate download](https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows). If you cannot install the Microsoft Universal C Runtime library you can still use the rest of {{es}} if you disable the {{ml}} feature. +On Windows the {{es}} {{ml}} feature requires the Microsoft Universal C Runtime library. This is built into Windows 10, Windows Server 2016 and more recent versions of Windows. For older versions of Windows it can be installed via Windows Update, or from a [separate download](https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows). If you cannot install the Microsoft Universal C Runtime library you can still use the rest of {{es}} if you disable the {{ml}} feature. :::: diff --git a/deploy-manage/deploy/self-managed/install-with-docker.md b/deploy-manage/deploy/self-managed/install-with-docker.md index c6061ec2c3..9ddae78037 100644 --- a/deploy-manage/deploy/self-managed/install-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-with-docker.md @@ -151,7 +151,7 @@ To remove the containers and their network, run: # Remove the Elastic network docker network rm elastic -# Remove the {es} container +# Remove the {{es}} container docker rm es01 # Remove the {kib} container diff --git a/deploy-manage/monitor/monitoring-data/kibana-alerts.md b/deploy-manage/monitor/monitoring-data/kibana-alerts.md index 5c22ce1d39..3c2ebf373c 100644 --- a/deploy-manage/monitor/monitoring-data/kibana-alerts.md +++ b/deploy-manage/monitor/monitoring-data/kibana-alerts.md @@ -12,7 +12,7 @@ applies: # Kibana alerts [kibana-alerts] -The {{stack}} {monitor-features} provide [Alerting rules](../../../explore-analyze/alerts-cases/alerts.md) out-of-the box to notify you of potential issues in the {{stack}}. These rules are preconfigured based on the best practices recommended by Elastic. However, you can tailor them to meet your specific needs. +The {{stack}} {{monitor-features}} provide [Alerting rules](../../../explore-analyze/alerts-cases/alerts.md) out-of-the box to notify you of potential issues in the {{stack}}. These rules are preconfigured based on the best practices recommended by Elastic. However, you can tailor them to meet your specific needs. :::{image} ../../../images/kibana-monitoring-kibana-alerting-notification.png :alt: {{kib}} alerting notifications in {stack-monitor-app} @@ -22,7 +22,7 @@ The {{stack}} {monitor-features} provide [Alerting rules](../../../explore-analy When you open **{{stack-monitor-app}}** for the first time, you will be asked to acknowledge the creation of these default rules. They are initially configured to detect and notify on various conditions across your monitored clusters. You can view notifications for: **Cluster health**, **Resource utilization**, and **Errors and exceptions** for {{es}} in real time. ::::{note} -The default {{watcher}} based "cluster alerts" for {{stack-monitor-app}} have been recreated as rules in {{kib}} {alert-features}. For this reason, the existing {{watcher}} email action `monitoring.cluster_alerts.email_notifications.email_address` no longer works. The default action for all {{stack-monitor-app}} rules is to write to {{kib}} logs and display a notification in the UI. +The default {{watcher}} based "cluster alerts" for {{stack-monitor-app}} have been recreated as rules in {{kib}} {{alert-features}}. For this reason, the existing {{watcher}} email action `monitoring.cluster_alerts.email_notifications.email_address` no longer works. The default action for all {{stack-monitor-app}} rules is to write to {{kib}} logs and display a notification in the UI. :::: diff --git a/deploy-manage/monitor/monitoring-data/monitor-troubleshooting.md b/deploy-manage/monitor/monitoring-data/monitor-troubleshooting.md index fbd94e37bd..c6576df07e 100644 --- a/deploy-manage/monitor/monitoring-data/monitor-troubleshooting.md +++ b/deploy-manage/monitor/monitoring-data/monitor-troubleshooting.md @@ -12,7 +12,7 @@ applies: # Troubleshooting [monitor-troubleshooting] -Use the information in this section to troubleshoot common problems and find answers for frequently asked questions related to the {{kib}} {monitor-features}. +Use the information in this section to troubleshoot common problems and find answers for frequently asked questions related to the {{kib}} {{monitor-features}}. ## Cannot view the cluster because the license information is invalid [_cannot_view_the_cluster_because_the_license_information_is_invalid] diff --git a/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md b/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md index 364b99dfa6..72c8740869 100644 --- a/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md +++ b/deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md @@ -10,7 +10,7 @@ applies: # Visualizing monitoring data [xpack-monitoring] -The {{kib}} {monitor-features} serve two separate purposes: +The {{kib}} {{monitor-features}} serve two separate purposes: 1. To visualize monitoring data from across the {{stack}}. You can view health and performance data for {{es}}, {{ls}}, {{ents}}, APM, and Beats in real time, as well as analyze past performance. 2. To monitor {{kib}} itself and route that data to the monitoring cluster. diff --git a/deploy-manage/monitor/stack-monitoring/collecting-log-data-with-filebeat.md b/deploy-manage/monitor/stack-monitoring/collecting-log-data-with-filebeat.md index 1ef1a04866..95d1639190 100644 --- a/deploy-manage/monitor/stack-monitoring/collecting-log-data-with-filebeat.md +++ b/deploy-manage/monitor/stack-monitoring/collecting-log-data-with-filebeat.md @@ -26,7 +26,7 @@ If you’re using {{agent}}, do not deploy {{filebeat}} for log collection. Inst 2. Identify which logs you want to monitor. - The {{filebeat}} {es} module can handle [audit logs](../logging-configuration/logfile-audit-output.md), [deprecation logs](../logging-configuration/elasticsearch-log4j-configuration-self-managed.md#deprecation-logging), [gc logs](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#gc-logging), [server logs](../logging-configuration/elasticsearch-log4j-configuration-self-managed.md), and [slow logs](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-slowlog.html). For more information about the location of your {{es}} logs, see the [path.logs](../../deploy/self-managed/important-settings-configuration.md#path-settings) setting. + The {{filebeat}} {{es}} module can handle [audit logs](../logging-configuration/logfile-audit-output.md), [deprecation logs](../logging-configuration/elasticsearch-log4j-configuration-self-managed.md#deprecation-logging), [gc logs](https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#gc-logging), [server logs](../logging-configuration/elasticsearch-log4j-configuration-self-managed.md), and [slow logs](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-slowlog.html). For more information about the location of your {{es}} logs, see the [path.logs](../../deploy/self-managed/important-settings-configuration.md#path-settings) setting. ::::{important} If there are both structured (`*.json`) and unstructured (plain text) versions of the logs, you must use the structured logs. Otherwise, they might not appear in the appropriate context in {{kib}}. @@ -54,11 +54,11 @@ If you’re using {{agent}}, do not deploy {{filebeat}} for log collection. Inst If you configured the monitoring cluster to use encrypted communications, you must access it via HTTPS. For example, use a `hosts` setting like `https://es-mon-1:9200`. ::::{important} - The {{es}} {monitor-features} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one [ingest node](../../../manage-data/ingest/transform-enrich/ingest-pipelines.md). + The {{es}} {{monitor-features}} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one [ingest node](../../../manage-data/ingest/transform-enrich/ingest-pipelines.md). :::: - If {{es}} {security-features} are enabled on the monitoring cluster, you must provide a valid user ID and password so that {{filebeat}} can send metrics successfully. + If {{es}} {{security-features}} are enabled on the monitoring cluster, you must provide a valid user ID and password so that {{filebeat}} can send metrics successfully. For more information about these configuration options, see [Configure the {{es}} output](https://www.elastic.co/guide/en/beats/filebeat/current/elasticsearch-output.html). diff --git a/deploy-manage/monitor/stack-monitoring/collecting-monitoring-data-with-metricbeat.md b/deploy-manage/monitor/stack-monitoring/collecting-monitoring-data-with-metricbeat.md index 753fefcfe2..7e609801fa 100644 --- a/deploy-manage/monitor/stack-monitoring/collecting-monitoring-data-with-metricbeat.md +++ b/deploy-manage/monitor/stack-monitoring/collecting-monitoring-data-with-metricbeat.md @@ -93,11 +93,11 @@ Want to use {{agent}} instead? Refer to [Collecting monitoring data with {{agent If you configured the monitoring cluster to use encrypted communications, you must access it via HTTPS. For example, use a `hosts` setting like `https://es-mon-1:9200`. ::::{important} - The {{es}} {monitor-features} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one [ingest node](../../../manage-data/ingest/transform-enrich/ingest-pipelines.md). + The {{es}} {{monitor-features}} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one [ingest node](../../../manage-data/ingest/transform-enrich/ingest-pipelines.md). :::: - If {{es}} {security-features} are enabled on the monitoring cluster, you must provide a valid user ID and password so that {{metricbeat}} can send metrics successfully: + If {{es}} {{security-features}} are enabled on the monitoring cluster, you must provide a valid user ID and password so that {{metricbeat}} can send metrics successfully: 1. Create a user on the monitoring cluster that has the [`remote_monitoring_agent` built-in role](../../users-roles/cluster-or-deployment-auth/built-in-roles.md). Alternatively, use the [`remote_monitoring_user` built-in user](../../users-roles/cluster-or-deployment-auth/built-in-users.md). 2. Add the `username` and `password` settings to the {{es}} output information in the {{metricbeat}} configuration file. diff --git a/deploy-manage/monitor/stack-monitoring/es-http-exporter.md b/deploy-manage/monitor/stack-monitoring/es-http-exporter.md index 5d2bef6d3c..6bf7dd4a8c 100644 --- a/deploy-manage/monitor/stack-monitoring/es-http-exporter.md +++ b/deploy-manage/monitor/stack-monitoring/es-http-exporter.md @@ -15,7 +15,7 @@ If you have previously configured legacy collection methods, you should migrate :::: -The `http` exporter is the preferred exporter in the {{es}} {monitor-features} because it enables the use of a separate monitoring cluster. As a secondary benefit, it avoids using a production cluster node as a coordinating node for indexing monitoring data because all requests are HTTP requests to the monitoring cluster. +The `http` exporter is the preferred exporter in the {{es}} {{monitor-features}} because it enables the use of a separate monitoring cluster. As a secondary benefit, it avoids using a production cluster node as a coordinating node for indexing monitoring data because all requests are HTTP requests to the monitoring cluster. The `http` exporter uses the low-level {{es}} REST Client, which enables it to send its data to any {{es}} cluster it can access through the network. Its requests make use of the [`filter_path`](https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html#common-options-response-filtering) parameter to reduce bandwidth whenever possible, which helps to ensure that communications between the production and monitoring clusters are as lightweight as possible. @@ -46,7 +46,7 @@ xpack.monitoring.exporters: 1. A `local` exporter defined explicitly whose arbitrary name is `my_local`. 2. An `http` exporter defined whose arbitrary name is `my_remote`. This name uniquely defines the exporter but is otherwise unused. 3. `host` is a required setting for `http` exporters. It must specify the HTTP port rather than the transport port. The default port value is `9200`. -4. User authentication for those using {{stack}} {security-features} or some other form of user authentication protecting the cluster. +4. User authentication for those using {{stack}} {{security-features}} or some other form of user authentication protecting the cluster. 5. See [HTTP exporter settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html#http-exporter-settings) for all TLS/SSL settings. If not supplied, the default node-level TLS/SSL settings are used. 6. Optional base path to prefix any outgoing request with in order to work with proxies. 7. Arbitrary key/value pairs to define as headers to send with every request. The array-based key/value format sends one header per value. diff --git a/deploy-manage/monitor/stack-monitoring/es-legacy-collection-methods.md b/deploy-manage/monitor/stack-monitoring/es-legacy-collection-methods.md index d0f899a27c..4c5f954263 100644 --- a/deploy-manage/monitor/stack-monitoring/es-legacy-collection-methods.md +++ b/deploy-manage/monitor/stack-monitoring/es-legacy-collection-methods.md @@ -29,7 +29,7 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). 1. Verify that the `xpack.monitoring.elasticsearch.collection.enabled` setting is `true`, which is its default value, on each node in the cluster. ::::{note} - You can specify this setting in either the `elasticsearch.yml` on each node or across the cluster as a dynamic cluster setting. If {{es}} {security-features} are enabled, you must have `monitor` cluster privileges to view the cluster settings and `manage` cluster privileges to change them. + You can specify this setting in either the `elasticsearch.yml` on each node or across the cluster as a dynamic cluster setting. If {{es}} {{security-features}} are enabled, you must have `monitor` cluster privileges to view the cluster settings and `manage` cluster privileges to change them. :::: @@ -38,7 +38,7 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). 2. Set the `xpack.monitoring.collection.enabled` setting to `true` on each node in the cluster. By default, it is disabled (`false`). ::::{note} - You can specify this setting in either the `elasticsearch.yml` on each node or across the cluster as a dynamic cluster setting. If {{es}} {security-features} are enabled, you must have `monitor` cluster privileges to view the cluster settings and `manage` cluster privileges to change them. + You can specify this setting in either the `elasticsearch.yml` on each node or across the cluster as a dynamic cluster setting. If {{es}} {{security-features}} are enabled, you must have `monitor` cluster privileges to view the cluster settings and `manage` cluster privileges to change them. :::: @@ -78,7 +78,7 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). By default, the data is stored on the same cluster by using a [`local` exporter](es-local-exporter.md). Alternatively, you can use an [`http` exporter](es-http-exporter.md) to send data to a separate *monitoring cluster*. ::::{important} - The {{es}} {monitor-features} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one [ingest node](../../../manage-data/ingest/transform-enrich/ingest-pipelines.md). + The {{es}} {{monitor-features}} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one [ingest node](../../../manage-data/ingest/transform-enrich/ingest-pipelines.md). :::: diff --git a/deploy-manage/monitor/stack-monitoring/es-local-exporter.md b/deploy-manage/monitor/stack-monitoring/es-local-exporter.md index 89c175e7ff..2e4c116b93 100644 --- a/deploy-manage/monitor/stack-monitoring/es-local-exporter.md +++ b/deploy-manage/monitor/stack-monitoring/es-local-exporter.md @@ -36,7 +36,7 @@ For the `local` exporter, all setup occurs only on the elected master node. This The elected master node is the only node to set up resources for the `local` exporter. Therefore all other nodes wait for the resources to be set up before indexing any monitoring data from their own collectors. Each of these nodes logs a message indicating that they are waiting for the resources to be set up. -One benefit of the `local` exporter is that it lives within the cluster and therefore no extra configuration is required when the cluster is secured with {{stack}} {security-features}. All operations, including indexing operations, that occur from a `local` exporter make use of the internal transport mechanisms within {{es}}. This behavior enables the exporter to be used without providing any user credentials when {{security-features}} are enabled. +One benefit of the `local` exporter is that it lives within the cluster and therefore no extra configuration is required when the cluster is secured with {{stack}} {{security-features}}. All operations, including indexing operations, that occur from a `local` exporter make use of the internal transport mechanisms within {{es}}. This behavior enables the exporter to be used without providing any user credentials when {{security-features}} are enabled. For more information about the configuration options for the `local` exporter, see [Local exporter settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html#local-exporter-settings). diff --git a/deploy-manage/monitor/stack-monitoring/es-monitoring-collectors.md b/deploy-manage/monitor/stack-monitoring/es-monitoring-collectors.md index 70e5213489..a37d192689 100644 --- a/deploy-manage/monitor/stack-monitoring/es-monitoring-collectors.md +++ b/deploy-manage/monitor/stack-monitoring/es-monitoring-collectors.md @@ -17,7 +17,7 @@ If you have previously configured legacy collection methods, you should migrate Collectors, as their name implies, collect things. Each collector runs once for each collection interval to obtain data from the public APIs in {{es}} and {{xpack}} that it chooses to monitor. When the data collection is finished, the data is handed in bulk to the [exporters](es-monitoring-exporters.md) to be sent to the monitoring clusters. Regardless of the number of exporters, each collector only runs once per collection interval. -There is only one collector per data type gathered. In other words, for any monitoring document that is created, it comes from a single collector rather than being merged from multiple collectors. The {{es}} {monitor-features} currently have a few collectors because the goal is to minimize overlap between them for optimal performance. +There is only one collector per data type gathered. In other words, for any monitoring document that is created, it comes from a single collector rather than being merged from multiple collectors. The {{es}} {{monitor-features}} currently have a few collectors because the goal is to minimize overlap between them for optimal performance. Each collector can create zero or more monitoring documents. For example, the `index_stats` collector collects all index statistics at the same time to avoid many unnecessary calls. @@ -30,7 +30,7 @@ Each collector can create zero or more monitoring documents. For example, the `i | Jobs | `job_stats` | Gathers details about all machine learning job statistics (for example, `GET/_ml/anomaly_detectors/_stats`). This information only needs to be collectedonce, so it is collected on the *elected* master node. However, for the masternode to be able to perform the collection, the master node must have`xpack.ml.enabled` set to true (default) and a license level that supports {{ml}}. | | Node Stats | `node_stats` | Gathers details about the running node, such as memory utilization and CPUusage (for example, `GET /_nodes/_local/stats`). This runs on *every* node with{{monitor-features}} enabled. One common failure results in the timeout of the nodestats request due to too many segment files. As a result, the collector spendstoo much time waiting for the file system stats to be calculated until itfinally times out. A single `node_stats` document is created per collection.This is collected per node to help to discover issues with nodes communicatingwith each other, but not with the monitoring cluster (for example, intermittentnetwork issues or memory pressure). | -The {{es}} {monitor-features} use a single threaded scheduler to run the collection of {{es}} monitoring data by all of the appropriate collectors on each node. This scheduler is managed locally by each node and its interval is controlled by specifying the `xpack.monitoring.collection.interval`, which defaults to 10 seconds (`10s`), at either the node or cluster level. +The {{es}} {{monitor-features}} use a single threaded scheduler to run the collection of {{es}} monitoring data by all of the appropriate collectors on each node. This scheduler is managed locally by each node and its interval is controlled by specifying the `xpack.monitoring.collection.interval`, which defaults to 10 seconds (`10s`), at either the node or cluster level. Fundamentally, each collector works on the same principle. Per collection interval, each collector is checked to see whether it should run and then the appropriate collectors run. The failure of an individual collector does not impact any other collector. @@ -48,14 +48,14 @@ For more information about the configuration options for the collectors, see [Mo ## Collecting data from across the Elastic Stack [es-monitoring-stack] -{{es}} {monitor-features} also receive monitoring data from other parts of the Elastic Stack. In this way, it serves as an unscheduled monitoring data collector for the stack. +{{es}} {{monitor-features}} also receive monitoring data from other parts of the Elastic Stack. In this way, it serves as an unscheduled monitoring data collector for the stack. By default, data collection is disabled. {{es}} monitoring data is not collected and all monitoring data from other sources such as {{kib}}, Beats, and Logstash is ignored. You must set `xpack.monitoring.collection.enabled` to `true` to enable the collection of monitoring data. See [Monitoring settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html). Once data is received, it is forwarded to the exporters to be routed to the monitoring cluster like all monitoring data. ::::{warning} -Because this stack-level "collector" lives outside of the collection interval of {{es}} {monitor-features}, it is not impacted by the `xpack.monitoring.collection.interval` setting. Therefore, data is passed to the exporters whenever it is received. This behavior can result in indices for {{kib}}, Logstash, or Beats being created somewhat unexpectedly. +Because this stack-level "collector" lives outside of the collection interval of {{es}} {{monitor-features}}, it is not impacted by the `xpack.monitoring.collection.interval` setting. Therefore, data is passed to the exporters whenever it is received. This behavior can result in indices for {{kib}}, Logstash, or Beats being created somewhat unexpectedly. :::: diff --git a/deploy-manage/monitor/stack-monitoring/es-monitoring-exporters.md b/deploy-manage/monitor/stack-monitoring/es-monitoring-exporters.md index 63043510f4..6c4ace2774 100644 --- a/deploy-manage/monitor/stack-monitoring/es-monitoring-exporters.md +++ b/deploy-manage/monitor/stack-monitoring/es-monitoring-exporters.md @@ -20,7 +20,7 @@ The purpose of exporters is to take data collected from any Elastic Stack source There are two types of exporters in {{es}}: `local` -: The default exporter used by {{es}} {monitor-features}. This exporter routes data back into the *same* cluster. See [Local exporters](es-local-exporter.md). +: The default exporter used by {{es}} {{monitor-features}}. This exporter routes data back into the *same* cluster. See [Local exporters](es-local-exporter.md). `http` : The preferred exporter, which you can use to route data into any supported {{es}} cluster accessible via HTTP. Production environments should always use a separate monitoring cluster. See [HTTP exporters](es-http-exporter.md). @@ -78,7 +78,7 @@ The templates are ordinary {{es}} templates that control the default settings an By default, monitoring indices are created daily (for example, `.monitoring-es-6-2017.08.26`). You can change the default date suffix for monitoring indices with the `index.name.time_format` setting. You can use this setting to control how frequently monitoring indices are created by a specific `http` exporter. You cannot use this setting with `local` exporters. For more information, see [HTTP exporter settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html#http-exporter-settings). ::::{warning} -Some users create their own templates that match *all* index patterns, which therefore impact the monitoring indices that get created. It is critical that you do not disable `_source` storage for the monitoring indices. If you do, {{kib}} {monitor-features} do not work and you cannot visualize monitoring data for your cluster. +Some users create their own templates that match *all* index patterns, which therefore impact the monitoring indices that get created. It is critical that you do not disable `_source` storage for the monitoring indices. If you do, {{kib}} {{monitor-features}} do not work and you cannot visualize monitoring data for your cluster. :::: diff --git a/deploy-manage/monitor/stack-monitoring/kibana-monitoring-data.md b/deploy-manage/monitor/stack-monitoring/kibana-monitoring-data.md index 79d59b7427..c8761eb358 100644 --- a/deploy-manage/monitor/stack-monitoring/kibana-monitoring-data.md +++ b/deploy-manage/monitor/stack-monitoring/kibana-monitoring-data.md @@ -61,7 +61,7 @@ If you use a separate monitoring cluster to store the monitoring data, it is str 7. Go to the **Stack Monitoring** page using the [global search field](/explore-analyze/find-and-organize/find-apps-and-objects.md). - If data collection is disabled, you are prompted to turn on data collection. If {{es}} {security-features} are enabled, you must have `manage` cluster privileges to turn on data collection. + If data collection is disabled, you are prompted to turn on data collection. If {{es}} {{security-features}} are enabled, you must have `manage` cluster privileges to turn on data collection. ::::{note} If you are using a separate monitoring cluster, you do not need to turn on data collection. The dashboards appear when there is data in the monitoring cluster. diff --git a/deploy-manage/monitor/stack-monitoring/kibana-monitoring-metricbeat.md b/deploy-manage/monitor/stack-monitoring/kibana-monitoring-metricbeat.md index f9c303d813..f5aab0743d 100644 --- a/deploy-manage/monitor/stack-monitoring/kibana-monitoring-metricbeat.md +++ b/deploy-manage/monitor/stack-monitoring/kibana-monitoring-metricbeat.md @@ -33,7 +33,7 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). 3. Set the `xpack.monitoring.collection.enabled` setting to `true` on each node in the production cluster. By default, it is disabled (`false`). ::::{note} - You can specify this setting in either the `elasticsearch.yml` on each node or across the cluster as a dynamic cluster setting. If {{es}} {security-features} are enabled, you must have `monitor` cluster privileges to view the cluster settings and `manage` cluster privileges to change them. + You can specify this setting in either the `elasticsearch.yml` on each node or across the cluster as a dynamic cluster setting. If {{es}} {{security-features}} are enabled, you must have `monitor` cluster privileges to view the cluster settings and `manage` cluster privileges to change them. :::: @@ -65,7 +65,7 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). For more information, see [Monitoring settings in {{es}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/monitoring-settings.html) and [Cluster update settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings). 4. [Install {{metricbeat}}](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-installation-configuration.html) on the same server as {{kib}}. -5. Enable the {{kib}} {xpack} module in {{metricbeat}}.
+5. Enable the {{kib}} {{xpack}} module in {{metricbeat}}.
For example, to enable the default configuration in the `modules.d` directory, run the following command: @@ -75,7 +75,7 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). For more information, see [Specify which modules to run](https://www.elastic.co/guide/en/beats/metricbeat/current/configuration-metricbeat.html) and [{{kib}} module](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-module-kibana.html). -6. Configure the {{kib}} {xpack} module in {{metricbeat}}.
+6. Configure the {{kib}} {{xpack}} module in {{metricbeat}}.
The `modules.d/kibana-xpack.yml` file contains the following settings: @@ -132,11 +132,11 @@ To learn about monitoring in general, see [Monitor a cluster](../../monitor.md). If you configured the monitoring cluster to use encrypted communications, you must access it via HTTPS. For example, use a `hosts` setting like `https://es-mon-1:9200`. ::::{important} - The {{es}} {monitor-features} use ingest pipelines. The cluster that stores the monitoring data must have at least one node with the `ingest` role. + The {{es}} {{monitor-features}} use ingest pipelines. The cluster that stores the monitoring data must have at least one node with the `ingest` role. :::: - If the {{es}} {security-features} are enabled on the monitoring cluster, you must provide a valid user ID and password so that {{metricbeat}} can send metrics successfully: + If the {{es}} {{security-features}} are enabled on the monitoring cluster, you must provide a valid user ID and password so that {{metricbeat}} can send metrics successfully: 1. Create a user on the monitoring cluster that has the `remote_monitoring_agent` [built-in role](../../users-roles/cluster-or-deployment-auth/built-in-roles.md). Alternatively, use the `remote_monitoring_user` [built-in user](../../users-roles/cluster-or-deployment-auth/built-in-users.md). 2. Add the `username` and `password` settings to the {{es}} output information in the {{metricbeat}} configuration file. diff --git a/deploy-manage/security/enabling-cipher-suites-for-stronger-encryption.md b/deploy-manage/security/enabling-cipher-suites-for-stronger-encryption.md index c3296bfedb..908fe4cfbd 100644 --- a/deploy-manage/security/enabling-cipher-suites-for-stronger-encryption.md +++ b/deploy-manage/security/enabling-cipher-suites-for-stronger-encryption.md @@ -9,7 +9,7 @@ The TLS and SSL protocols use a cipher suite that determines the strength of enc The *Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files* enable the use of additional cipher suites for Java in a separate JAR file that you need to add to your Java installation. You can download this JAR file from Oracle’s [download page](http://www.oracle.com/technetwork/java/javase/downloads/index.md). The *JCE Unlimited Strength Jurisdiction Policy Files`* are required for encryption with key lengths greater than 128 bits, such as 256-bit AES encryption. -After installation, all cipher suites in the JCE are available for use but requires configuration in order to use them. To enable the use of stronger cipher suites with {{es}} {security-features}, configure the [`cipher_suites` parameter](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#ssl-tls-settings). +After installation, all cipher suites in the JCE are available for use but requires configuration in order to use them. To enable the use of stronger cipher suites with {{es}} {{security-features}}, configure the [`cipher_suites` parameter](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#ssl-tls-settings). ::::{note} The *JCE Unlimited Strength Jurisdiction Policy Files* must be installed on all nodes in the cluster to establish an improved level of encryption strength. diff --git a/deploy-manage/security/httprest-clients-security.md b/deploy-manage/security/httprest-clients-security.md index c9376cd9a3..38892be85f 100644 --- a/deploy-manage/security/httprest-clients-security.md +++ b/deploy-manage/security/httprest-clients-security.md @@ -5,7 +5,7 @@ mapped_pages: # HTTP/REST clients and security [http-clients] -The {{es}} {security-features} work with standard HTTP [basic authentication](https://en.wikipedia.org/wiki/Basic_access_authentication) headers to authenticate users. Since Elasticsearch is stateless, this header must be sent with every request: +The {{es}} {{security-features}} work with standard HTTP [basic authentication](https://en.wikipedia.org/wiki/Basic_access_authentication) headers to authenticate users. Since Elasticsearch is stateless, this header must be sent with every request: ```shell Authorization: Basic <1> diff --git a/deploy-manage/security/secure-clients-integrations.md b/deploy-manage/security/secure-clients-integrations.md index e4520f4e66..2f7212cb1c 100644 --- a/deploy-manage/security/secure-clients-integrations.md +++ b/deploy-manage/security/secure-clients-integrations.md @@ -7,7 +7,7 @@ mapped_pages: You will need to update the configuration for several [clients](httprest-clients-security.md) to work with a secured {{es}} cluster. -The {{es}} {security-features} enable you to secure your {{es}} cluster. But {{es}} itself is only one product within the {{stack}}. It is often the case that other products in the {{stack}} are connected to the cluster and therefore need to be secured as well, or at least communicate with the cluster in a secured way: +The {{es}} {{security-features}} enable you to secure your {{es}} cluster. But {{es}} itself is only one product within the {{stack}}. It is often the case that other products in the {{stack}} are connected to the cluster and therefore need to be secured as well, or at least communicate with the cluster in a secured way: * [Apache Hadoop](https://www.elastic.co/guide/en/elasticsearch/reference/current/hadoop.html) * [Auditbeat](https://www.elastic.co/guide/en/beats/auditbeat/current/securing-auditbeat.html) diff --git a/deploy-manage/security/set-up-minimal-security.md b/deploy-manage/security/set-up-minimal-security.md index 71e1f5344f..4a6f6e4db9 100644 --- a/deploy-manage/security/set-up-minimal-security.md +++ b/deploy-manage/security/set-up-minimal-security.md @@ -10,13 +10,13 @@ mapped_pages: ::::{important} -You only need to complete the following steps if you’re running an existing, unsecured cluster and want to enable the {{es}} {security-features}. +You only need to complete the following steps if you’re running an existing, unsecured cluster and want to enable the {{es}} {{security-features}}. :::: In {{es}} 8.0 and later, security is [enabled automatically](../deploy/self-managed/installing-elasticsearch.md) when you start {{es}} for the first time. -If you’re running an existing {{es}} cluster where security is disabled, you can manually enable the {{es}} {security-features} and then create passwords for built-in users. You can add more users later, but using the built-in users simplifies the process of enabling security for your cluster. +If you’re running an existing {{es}} cluster where security is disabled, you can manually enable the {{es}} {{security-features}} and then create passwords for built-in users. You can add more users later, but using the built-in users simplifies the process of enabling security for your cluster. ::::{important} The minimal security scenario is not sufficient for [production mode](../deploy/self-managed/bootstrap-checks.md#dev-vs-prod-mode) clusters. If your cluster has multiple nodes, you must enable minimal security and then [configure Transport Layer Security (TLS)](secure-cluster-communications.md) between nodes. diff --git a/deploy-manage/tools/snapshot-and-restore/create-snapshots.md b/deploy-manage/tools/snapshot-and-restore/create-snapshots.md index 78dc01d03a..0fe0f2b104 100644 --- a/deploy-manage/tools/snapshot-and-restore/create-snapshots.md +++ b/deploy-manage/tools/snapshot-and-restore/create-snapshots.md @@ -57,7 +57,7 @@ The guide also provides tips for creating dedicated cluster state snapshots and ### {{slm-init}} security [slm-security] -The following [cluster privileges](../../users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md#privileges-list-cluster) control access to the {{slm-init}} actions when {{es}} {security-features} are enabled: +The following [cluster privileges](../../users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md#privileges-list-cluster) control access to the {{slm-init}} actions when {{es}} {{security-features}} are enabled: `manage_slm` : Allows a user to perform all {{slm-init}} actions, including creating and updating policies and starting and stopping {{slm-init}}. diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/authentication-realms.md b/deploy-manage/users-roles/cluster-or-deployment-auth/authentication-realms.md index acb90a3f46..d015d658c9 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/authentication-realms.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/authentication-realms.md @@ -43,7 +43,7 @@ The {{security-features}} also support custom realms. If you need to integrate w Realm types can roughly be classified in two categories: Internal -: Realms that are internal to Elasticsearch and don’t require any communication with external parties. They are fully managed by the {{stack}} {security-features}. There can only be a maximum of one configured realm per internal realm type. The {{security-features}} provide two internal realm types: `native` and `file`. +: Realms that are internal to Elasticsearch and don’t require any communication with external parties. They are fully managed by the {{stack}} {{security-features}}. There can only be a maximum of one configured realm per internal realm type. The {{security-features}} provide two internal realm types: `native` and `file`. External : Realms that require interaction with parties/components external to {{es}}, typically, with enterprise grade identity management systems. Unlike internal realms, there can be as many external realms as one would like - each with its own unique name and configuration. The {{security-features}} provide the following external realm types: `ldap`, `active_directory`, `saml`, `kerberos`, and `pki`. diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/authorization-plugins.md b/deploy-manage/users-roles/cluster-or-deployment-auth/authorization-plugins.md index 9a6d9e68e5..3666ac2b35 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/authorization-plugins.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/authorization-plugins.md @@ -5,7 +5,7 @@ mapped_pages: # Authorization plugins [custom-roles-authorization] -If you need to retrieve user roles from a system not supported out-of-the-box or if the authorization system that is provided by the {{es}} {security-features} does not meet your needs, a SPI loaded security extension can be implemented to customize role retrieval and/or the authorization system. The SPI loaded security extension is part of an ordinary elasticsearch plugin. +If you need to retrieve user roles from a system not supported out-of-the-box or if the authorization system that is provided by the {{es}} {{security-features}} does not meet your needs, a SPI loaded security extension can be implemented to customize role retrieval and/or the authorization system. The SPI loaded security extension is part of an ordinary elasticsearch plugin. ## Implementing a custom roles provider [implementing-custom-roles-provider] diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/controlling-user-cache.md b/deploy-manage/users-roles/cluster-or-deployment-auth/controlling-user-cache.md index cefbfb99e7..867972e308 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/controlling-user-cache.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/controlling-user-cache.md @@ -17,7 +17,7 @@ PKI and JWT realms do not cache user credentials, but do cache the resolved user :::: -The cached user credentials are hashed in memory. By default, the {{es}} {security-features} use a salted `sha-256` hash algorithm. You can use a different hashing algorithm by setting the `cache.hash_algo` realm settings. See [User cache and password hash algorithms](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#hashing-settings). +The cached user credentials are hashed in memory. By default, the {{es}} {{security-features}} use a salted `sha-256` hash algorithm. You can use a different hashing algorithm by setting the `cache.hash_algo` realm settings. See [User cache and password hash algorithms](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#hashing-settings). ## Evicting users from the cache [cache-eviction-api] diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/custom.md b/deploy-manage/users-roles/cluster-or-deployment-auth/custom.md index f1f689edbb..2221e27f05 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/custom.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/custom.md @@ -5,7 +5,7 @@ mapped_pages: # Custom [custom-realms] -If you are using an authentication system that is not supported out-of-the-box by the {{es}} {security-features}, you can create a custom realm to interact with it to authenticate users. You implement a custom realm as an SPI loaded security extension as part of an ordinary elasticsearch plugin. +If you are using an authentication system that is not supported out-of-the-box by the {{es}} {{security-features}}, you can create a custom realm to interact with it to authenticate users. You implement a custom realm as an SPI loaded security extension as part of an ordinary elasticsearch plugin. ## Implementing a custom realm [implementing-custom-realm] @@ -37,7 +37,7 @@ To package your custom realm as a plugin: } ``` - The `getAuthenticationFailureHandler` method is used to optionally provide a custom `AuthenticationFailureHandler`, which will control how the {{es}} {security-features} respond in certain authentication failure events. + The `getAuthenticationFailureHandler` method is used to optionally provide a custom `AuthenticationFailureHandler`, which will control how the {{es}} {{security-features}} respond in certain authentication failure events. ```java @Override diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/granting-privileges-for-data-streams-aliases.md b/deploy-manage/users-roles/cluster-or-deployment-auth/granting-privileges-for-data-streams-aliases.md index e3cc720c37..ab18fad749 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/granting-privileges-for-data-streams-aliases.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/granting-privileges-for-data-streams-aliases.md @@ -5,7 +5,7 @@ mapped_pages: # Granting privileges for data streams and aliases [securing-aliases] -{{es}} {security-features} allow you to secure operations executed against [data streams](../../../manage-data/data-store/index-types/data-streams.md) and [aliases](../../../manage-data/data-store/aliases.md). +{{es}} {{security-features}} allow you to secure operations executed against [data streams](../../../manage-data/data-store/index-types/data-streams.md) and [aliases](../../../manage-data/data-store/aliases.md). ## Data stream privileges [data-stream-privileges] diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/realm-chains.md b/deploy-manage/users-roles/cluster-or-deployment-auth/realm-chains.md index 78da3d93ae..eef6143b7c 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/realm-chains.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/realm-chains.md @@ -7,7 +7,7 @@ mapped_pages: [Realms](authentication-realms.md) live within a *realm chain*. It is essentially a prioritized list of configured realms (typically of various types). Realms are consulted in ascending order (that is to say, the realm with the lowest `order` value is consulted first). You must make sure each configured realm has a distinct `order` setting. In the event that two or more realms have the same `order`, the node will fail to start. -During the authentication process, {{stack}} {security-features} consult and try to authenticate the request one realm at a time. Once one of the realms successfully authenticates the request, the authentication is considered to be successful. The authenticated user is associated with the request, which then proceeds to the authorization phase. If a realm cannot authenticate the request, the next realm in the chain is consulted. If all realms in the chain cannot authenticate the request, the authentication is considered to be unsuccessful and an authentication error is returned (as HTTP status code `401`). +During the authentication process, {{stack}} {{security-features}} consult and try to authenticate the request one realm at a time. Once one of the realms successfully authenticates the request, the authentication is considered to be successful. The authenticated user is associated with the request, which then proceeds to the authorization phase. If a realm cannot authenticate the request, the next realm in the chain is consulted. If all realms in the chain cannot authenticate the request, the authentication is considered to be unsuccessful and an authentication error is returned (as HTTP status code `401`). ::::{note} Some systems (e.g. Active Directory) have a temporary lock-out period after several successive failed login attempts. If the same username exists in multiple realms, unintentional account lockouts are possible. For more information, see [Users are frequently locked out of Active Directory](../../../troubleshoot/elasticsearch/security/trouble-shoot-active-directory.md). diff --git a/explore-analyze/alerts-cases/alerts/alerting-getting-started.md b/explore-analyze/alerts-cases/alerts/alerting-getting-started.md index 7d4909257d..9a141bca26 100644 --- a/explore-analyze/alerts-cases/alerts/alerting-getting-started.md +++ b/explore-analyze/alerts-cases/alerts/alerting-getting-started.md @@ -108,7 +108,7 @@ A rule consists of conditions, actions, and a schedule. When conditions are met, ## Differences from {{watcher}} [alerting-concepts-differences] -[{{watcher}}](../../../explore-analyze/alerts-cases/watcher.md) and the {{kib}} {alert-features} are both used to detect conditions and can trigger actions in response, but they are completely independent alerting systems. +[{{watcher}}](../../../explore-analyze/alerts-cases/watcher.md) and the {{kib}} {{alert-features}} are both used to detect conditions and can trigger actions in response, but they are completely independent alerting systems. This section will clarify some of the important differences in the function and intent of the two systems. diff --git a/explore-analyze/alerts-cases/alerts/alerting-setup.md b/explore-analyze/alerts-cases/alerts/alerting-setup.md index 8e7555b390..7b9cf3f67a 100644 --- a/explore-analyze/alerts-cases/alerts/alerting-setup.md +++ b/explore-analyze/alerts-cases/alerts/alerting-setup.md @@ -20,7 +20,7 @@ If you are using an **on-premises** {{stack}} deployment: If you are using an **on-premises** {{stack}} deployment with [**security**](../../../deploy-manage/security.md): -* If you are unable to access {{kib}} {alert-features}, ensure that you have not [explicitly disabled API keys](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#api-key-service-settings). +* If you are unable to access {{kib}} {{alert-features}}, ensure that you have not [explicitly disabled API keys](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#api-key-service-settings). The alerting framework uses queries that require the `search.allow_expensive_queries` setting to be `true`. See the scripts [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-query.html#_allow_expensive_queries_4). diff --git a/explore-analyze/alerts-cases/alerts/view-alerts.md b/explore-analyze/alerts-cases/alerts/view-alerts.md index abc2b2f598..3660ef5d10 100644 --- a/explore-analyze/alerts-cases/alerts/view-alerts.md +++ b/explore-analyze/alerts-cases/alerts/view-alerts.md @@ -18,7 +18,7 @@ You can manage the alerts for each rule in **{{stack-manage-app}}** > **{{rules- ::: ::::{note} -You must have the appropriate {{kib}} {alert-features} and index privileges to view alerts. Refer to [Alerting security requirements](alerting-setup.md#alerting-security). +You must have the appropriate {{kib}} {{alert-features}} and index privileges to view alerts. Refer to [Alerting security requirements](alerting-setup.md#alerting-security). :::: diff --git a/explore-analyze/alerts-cases/watcher/actions-webhook.md b/explore-analyze/alerts-cases/watcher/actions-webhook.md index 278fac2478..2bbaad7df0 100644 --- a/explore-analyze/alerts-cases/watcher/actions-webhook.md +++ b/explore-analyze/alerts-cases/watcher/actions-webhook.md @@ -68,10 +68,10 @@ You can use basic authentication when sending a request to a secured webservice. 1. The username and password for the user creating the issue ::::{note} -By default, both the username and the password are stored in the `.watches` index in plain text. When the {{es}} {security-features} are enabled, {{watcher}} can encrypt the password before storing it. +By default, both the username and the password are stored in the `.watches` index in plain text. When the {{es}} {{security-features}} are enabled, {{watcher}} can encrypt the password before storing it. :::: -You can also use PKI-based authentication when submitting requests to a cluster that has {{es}} {security-features} enabled. When you use PKI-based authentication instead of HTTP basic auth, you don’t need to store any authentication information in the watch itself. To use PKI-based authentication, you [configure the SSL key settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/notification-settings.html#ssl-notification-settings) for {{watcher}} in `elasticsearch.yml`. +You can also use PKI-based authentication when submitting requests to a cluster that has {{es}} {{security-features}} enabled. When you use PKI-based authentication instead of HTTP basic auth, you don’t need to store any authentication information in the watch itself. To use PKI-based authentication, you [configure the SSL key settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/notification-settings.html#ssl-notification-settings) for {{watcher}} in `elasticsearch.yml`. ## Query parameters [webhook-query-parameters] diff --git a/explore-analyze/alerts-cases/watcher/managing-watches.md b/explore-analyze/alerts-cases/watcher/managing-watches.md index 8c0e216e3f..0b4b37840e 100644 --- a/explore-analyze/alerts-cases/watcher/managing-watches.md +++ b/explore-analyze/alerts-cases/watcher/managing-watches.md @@ -22,7 +22,7 @@ mapped_pages: Currently there is not dedicated API for listing the stored watches. However, since {{watcher}} stores its watches in the `.watches` index, you can list them by executing a search on this index. ::::{important} -You can only perform read actions on the `.watches` index. You must use the {{watcher}} APIs to create, update, and delete watches. If {{es}} {security-features} are enabled, we recommend you only grant users `read` privileges on the `.watches` index. +You can only perform read actions on the `.watches` index. You must use the {{watcher}} APIs to create, update, and delete watches. If {{es}} {{security-features}} are enabled, we recommend you only grant users `read` privileges on the `.watches` index. :::: For example, the following returns the first 100 watches: diff --git a/explore-analyze/machine-learning.md b/explore-analyze/machine-learning.md index ac7fe56d52..eb5da7647f 100644 --- a/explore-analyze/machine-learning.md +++ b/explore-analyze/machine-learning.md @@ -41,7 +41,7 @@ The {{ml-features}} that are available vary by project type: ## Synchronize saved objects [machine-learning-synchronize-saved-objects] -Before you can view your {{ml}} {dfeeds}, jobs, and trained models in {{kib}}, they must have saved objects. For example, if you used APIs to create your jobs, wait for automatic synchronization or go to the **{{ml-app}}** page and click **Synchronize saved objects**. +Before you can view your {{ml}} {{dfeeds}}, jobs, and trained models in {{kib}}, they must have saved objects. For example, if you used APIs to create your jobs, wait for automatic synchronization or go to the **{{ml-app}}** page and click **Synchronize saved objects**. ## Export and import jobs [machine-learning-export-and-import-jobs] @@ -51,6 +51,6 @@ The exported file contains configuration details; it does not contain the {{ml}} There are some additional actions that you must take before you can successfully import and run your jobs: -* The {{data-sources}} that are used by {{anomaly-detect}} {dfeeds} and {{dfanalytics}} source indices must exist; otherwise, the import fails. +* The {{data-sources}} that are used by {{anomaly-detect}} {{dfeeds}} and {{dfanalytics}} source indices must exist; otherwise, the import fails. * If your {{anomaly-jobs}} use custom rules with filter lists, the filter lists must exist; otherwise, the import fails. * If your {{anomaly-jobs}} were associated with calendars, you must create the calendar in the new environment and add your imported jobs to the calendar. \ No newline at end of file diff --git a/explore-analyze/machine-learning/anomaly-detection/anomaly-how-tos.md b/explore-analyze/machine-learning/anomaly-detection/anomaly-how-tos.md index 5d9814f363..c8e2dca49d 100644 --- a/explore-analyze/machine-learning/anomaly-detection/anomaly-how-tos.md +++ b/explore-analyze/machine-learning/anomaly-detection/anomaly-how-tos.md @@ -27,7 +27,7 @@ Though it is quite simple to analyze your data and provide quick {{ml}} results, ## {{anomaly-detect-cap}} examples in blog posts [anomaly-examples-blog-posts] -The blog posts listed below show how to get the most out of Elastic {{ml}} {anomaly-detect}. +The blog posts listed below show how to get the most out of Elastic {{ml}} {{anomaly-detect}}. * [Sizing for {{ml}} with {{es}}](https://www.elastic.co/blog/sizing-machine-learning-with-elasticsearch) * [Filtering input data to refine {{ml-jobs}}](https://www.elastic.co/blog/filtering-input-data-to-refine-machine-learning-jobs) @@ -37,7 +37,7 @@ The blog posts listed below show how to get the most out of Elastic {{ml}} {anom * [User annotations for Elastic {{ml}}](https://www.elastic.co/blog/augmenting-results-with-user-annotations-for-elastic-machine-learning) * [Custom {{es}} aggregations for {{ml-jobs}}](https://www.elastic.co/blog/custom-elasticsearch-aggregations-for-machine-learning-jobs) * [Analysing Linux auditd anomalies with Auditbeat and {{ml}}](https://www.elastic.co/blog/analysing-linux-auditd-anomalies-with-auditbeat-and-elastic-stack-machine-learning) -* [How to optimize {{es}} {ml} job configurations using job validation](https://www.elastic.co/blog/how-to-optimize-elasticsearch-machine-learning-job-configurations-using-job-validation) +* [How to optimize {{es}} {{ml}} job configurations using job validation](https://www.elastic.co/blog/how-to-optimize-elasticsearch-machine-learning-job-configurations-using-job-validation) * [Interpretability in {{ml}}: Identifying anomalies, influencers, and root causes](https://www.elastic.co/blog/interpretability-in-ml-identifying-anomalies-influencers-root-causes) There are also some examples in the {{ml}} folder in the [examples repository](https://github.com/elastic/examples). diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-api-quickref.md b/explore-analyze/machine-learning/anomaly-detection/ml-api-quickref.md index a0c7e6964c..1c0ce58e5e 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-api-quickref.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-api-quickref.md @@ -8,7 +8,7 @@ mapped_pages: # API quick reference [ml-api-quickref] -All {{ml}} {anomaly-detect} endpoints have the following base: +All {{ml}} {{anomaly-detect}} endpoints have the following base: ```js /_ml/ @@ -23,4 +23,4 @@ The main resources can be accessed with a variety of endpoints: * [`/results/`](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly): Access the results of an {anomaly-job} * [`/model_snapshots/`](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly): Manage model snapshots -For a full list, see [{{ml-cap}} {anomaly-detect} APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly). +For a full list, see [{{ml-cap}} {{anomaly-detect}} APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly). diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-url.md b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-url.md index bd701641fd..e598f4cd09 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-url.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-url.md @@ -57,7 +57,7 @@ When you click this custom URL in the anomalies table in {{kib}}, it opens up th ::::{tip} * The custom URL links in the anomaly tables use pop-ups. You must configure your web browser so that it does not block pop-up windows or create an exception for your {{kib}} URL. -* When creating a link to a {{kib}} dashboard, the URLs for dashboards can be very long. Be careful of typos, end of line characters, and URL encoding. Also ensure you use the appropriate index ID for the target {{kib}} {data-source}. +* When creating a link to a {{kib}} dashboard, the URLs for dashboards can be very long. Be careful of typos, end of line characters, and URL encoding. Also ensure you use the appropriate index ID for the target {{kib}} {{data-source}}. * If you use an influencer name for string substitution, keep in mind that it might not always be available in the analysis results and the URL is invalid in those cases. There is not always a statistically significant influencer for each anomaly. * The dates substituted for `$earliest$` and `$latest$` tokens are in ISO-8601 format and the target system must understand this format. * If the job performs an analysis against nested JSON fields, the tokens for string substitution can refer to these fields using dot notation. For example, `$cpu.total$`. diff --git a/explore-analyze/machine-learning/anomaly-detection/move-jobs.md b/explore-analyze/machine-learning/anomaly-detection/move-jobs.md index adf57b9962..9b513834ff 100644 --- a/explore-analyze/machine-learning/anomaly-detection/move-jobs.md +++ b/explore-analyze/machine-learning/anomaly-detection/move-jobs.md @@ -15,6 +15,6 @@ The exported file contains configuration details; it does not contain the {{ml}} There are some additional actions that you must take before you can successfully import and run your jobs: -1. The {{kib}} [{{data-sources}}](https://www.elastic.co/guide/en/kibana/current/data-views.html) that are used by {{anomaly-detect}} {dfeeds} and {{dfanalytics}} source indices must exist; otherwise, the import fails. +1. The {{kib}} [{{data-sources}}](https://www.elastic.co/guide/en/kibana/current/data-views.html) that are used by {{anomaly-detect}} {{dfeeds}} and {{dfanalytics}} source indices must exist; otherwise, the import fails. 2. If your {{anomaly-jobs}} use [custom rules](ml-configuring-detector-custom-rules.md) with filter lists, the filter lists must exist; otherwise, the import fails. To create filter lists, use {{kib}} or the [create filters API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter). 3. If your {{anomaly-jobs}} were associated with [calendars](https://www.elastic.co/guide/en/machine-learning/current/ml-ad-run-jobs.html#ml-ad-calendars), you must create the calendar in the new environment and add your imported jobs to the calendar. Use {{kib}} or the [create calendars](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar), [add events to calendar](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events), and [add jobs to calendar](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job) APIs. diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-classification.md b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-classification.md index 30753ac18a..71306c3d04 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-classification.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-classification.md @@ -193,13 +193,13 @@ For instance, suppose you have an online service and you would like to predict w {{infer-cap}} can be used as a processor specified in an [ingest pipeline](../../../manage-data/ingest/transform-enrich/ingest-pipelines.md). It uses a trained model to infer against the data that is being ingested in the pipeline. The model is used on the ingest node. {{infer-cap}} pre-processes the data by using the model and provides a prediction. After the process, the pipeline continues executing (if there is any other processor in the pipeline), finally the new data together with the results are indexed into the destination index. -Check the [{{infer}} processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-processor.html) and [the {{ml}} {dfanalytics} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. +Check the [{{infer}} processor](https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-processor.html) and [the {{ml}} {{dfanalytics}} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. #### {{infer-cap}} aggregation [ml-inference-aggregation-class] {{infer-cap}} can also be used as a pipeline aggregation. You can reference a trained model in the aggregation to infer on the result field of the parent bucket aggregation. The {{infer}} aggregation uses the model on the results to provide a prediction. This aggregation enables you to run {{classification}} or {{reganalysis}} at search time. If you want to perform the analysis on a small set of data, this aggregation enables you to generate predictions without the need to set up a processor in the ingest pipeline. -Check the [{{infer}} bucket aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-inference-bucket-aggregation.html) and [the {{ml}} {dfanalytics} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. +Check the [{{infer}} bucket aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-inference-bucket-aggregation.html) and [the {{ml}} {{dfanalytics}} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. ::::{note} If you use trained model aliases to reference your trained model in an {{infer}} processor or {{infer}} aggregation, you can replace your trained model with a new one without the need of updating the processor or the aggregation. Reassign the alias you used to a new trained model ID by using the [Create or update trained model aliases API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias). The new trained model needs to use the same type of {{dfanalytics}} as the old one. diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-concepts.md b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-concepts.md index a56f566ca8..1760e443ac 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-concepts.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-concepts.md @@ -8,7 +8,7 @@ mapped_pages: # Advanced concepts [ml-dfa-concepts] -This section explains the more complex concepts of the Elastic {{ml}} {dfanalytics} feature. +This section explains the more complex concepts of the Elastic {{ml}} {{dfanalytics}} feature. * [How {{dfanalytics-jobs}} work](ml-dfa-phases.md) * [Working with {{dfanalytics}} at scale](ml-dfa-scale.md) diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-custom-urls.md b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-custom-urls.md index 3665b02465..0e278dea03 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-custom-urls.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-custom-urls.md @@ -54,7 +54,7 @@ When you click this custom URL, it opens up the **Discover** page and displays s ::::{tip} * The custom URL links use pop-ups. You must configure your web browser so that it does not block pop-up windows or create an exception for your {{kib}} URL. -* When creating a link to a {{kib}} dashboard, the URLs for dashboards can be very long. Be careful of typos, end of line characters, and URL encoding. Also ensure you use the appropriate index ID for the target {{kib}} {data-source}. +* When creating a link to a {{kib}} dashboard, the URLs for dashboards can be very long. Be careful of typos, end of line characters, and URL encoding. Also ensure you use the appropriate index ID for the target {{kib}} {{data-source}}. * The dates substituted for `$earliest$` and `$latest$` tokens are in ISO-8601 format and the target system must understand this format. * If the job performs an analysis against nested JSON fields, the tokens for string substitution can refer to these fields using dot notation. For example, `$cpu.total$`. * {{es}} source data mappings might make it difficult for the query string to work. Test the custom URL before saving the job configuration to check that it works as expected, particularly when using string substitution. diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-regression.md b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-regression.md index bb7b77fa1f..bd1f1e1584 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-regression.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-regression.md @@ -145,7 +145,7 @@ Check the [{{infer}} processor](https://www.elastic.co/guide/en/elasticsearch/re {{infer-cap}} can also be used as a pipeline aggregation. You can reference a trained model in the aggregation to infer on the result field of the parent bucket aggregation. The {{infer}} aggregation uses the model on the results to provide a prediction. This aggregation enables you to run {{classification}} or {{reganalysis}} at search time. If you want to perform the analysis on a small set of data, this aggregation enables you to generate predictions without the need to set up a processor in the ingest pipeline. -Check the [{{infer}} bucket aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-inference-bucket-aggregation.html) and [the {{ml}} {dfanalytics} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. +Check the [{{infer}} bucket aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-inference-bucket-aggregation.html) and [the {{ml}} {{dfanalytics}} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-data-frame) to learn more. ::::{note} If you use trained model aliases to reference your trained model in an {{infer}} processor or {{infer}} aggregation, you can replace your trained model with a new one without the need of updating the processor or the aggregation. Reassign the alias you used to a new trained model ID by using the [Create or update trained model aliases API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias). The new trained model needs to use the same type of {{dfanalytics}} as the old one. @@ -220,7 +220,7 @@ The sample flight data is used in this example because it is easily accessible. To predict the number of minutes delayed for each flight: -1. Verify that your environment is set up properly to use {{ml-features}}. The {{stack}} {security-features} require a user that has authority to create and manage {{dfanalytics-jobs}}. See [Setup and security](../setting-up-machine-learning.md). +1. Verify that your environment is set up properly to use {{ml-features}}. The {{stack}} {{security-features}} require a user that has authority to create and manage {{dfanalytics-jobs}}. See [Setup and security](../setting-up-machine-learning.md). 2. Create a {{dfanalytics-job}}. You can use the wizard on the **{{ml-app}}** > **Data Frame Analytics** tab in {{kib}} or the [create {{dfanalytics-jobs}}](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics) API. :::{image} ../../../images/machine-learning-flights-regression-job-1.jpg diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md b/explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md index 532367faa8..c1cf80a5d1 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md @@ -15,28 +15,28 @@ The purpose of {{feat-imp}} is to help you determine whether the predictions are You can see the average magnitude of the {{feat-imp}} values for each field across all the training data in {{kib}} or by using the [get trained model API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models). For example, {{kib}} shows the total feature importance for each field in {{regression}} or binary {{classanalysis}} results as follows: :::{image} ../../../images/machine-learning-flights-regression-total-importance.jpg -:alt: Total {{feat-imp}} values for a {{regression}} {dfanalytics-job} in {kib} +:alt: Total {{feat-imp}} values for a {{regression}} {{dfanalytics-job}} in {kib} :class: screenshot ::: If the {{classanalysis}} involves more than two classes, {{kib}} uses colors to show how the impact of each field varies by class. For example: :::{image} ../../../images/machine-learning-diamonds-classification-total-importance.png -:alt: Total {{feat-imp}} values for a {{classification}} {dfanalytics-job} in {kib} +:alt: Total {{feat-imp}} values for a {{classification}} {{dfanalytics-job}} in {kib} :class: screenshot ::: You can also examine the feature importance values for each individual prediction. In {{kib}}, you can see these values in JSON objects or decision plots. For {{reganalysis}}, each decision plot starts at a shared baseline, which is the average of the prediction values for all the data points in the training data set. When you add all of the feature importance values for a particular data point to that baseline, you arrive at the numeric prediction value. If a {{feat-imp}} value is negative, it reduces the prediction value. If a {{feat-imp}} value is positive, it increases the prediction value. For example: :::{image} ../../../images/machine-learning-flights-regression-decision-plot.png -:alt: Feature importance values for a {{regression}} {dfanalytics-job} in {kib} +:alt: Feature importance values for a {{regression}} {{dfanalytics-job}} in {kib} :class: screenshot ::: For {{classanalysis}}, the sum of the {{feat-imp}} values approximates the predicted logarithm of odds for each data point. The simplest way to understand {{feat-imp}} in the context of {{classanalysis}} is to look at the decision plots in {{kib}}. For each data point, there is a chart which shows the relative impact of each feature on the prediction probability for that class. This information helps you to understand which features reduces or increase the prediction probability. For example: :::{image} ../../../images/machine-learning-flights-classification-decision-plot.png -:alt: A decision plot in {{kib}}for a {{classification}} {dfanalytics-job} +:alt: A decision plot in {{kib}}for a {{classification}} {{dfanalytics-job}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-e5.md b/explore-analyze/machine-learning/nlp/ml-nlp-e5.md index 2d99273c86..156536074b 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-e5.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-e5.md @@ -91,7 +91,7 @@ Alternatively, you can download and deploy the E5 model to an {{infer}} pipeline 1. In {{kib}}, navigate to **Search** > **Indices**. 2. Select the index from the list that has an {{infer}} pipeline in which you want to use E5. 3. Navigate to the **Pipelines** tab. -4. Under **{{ml-app}} {infer-cap} Pipelines**, click the **Deploy** button in the **Improve your results with E5** section to begin downloading the E5 model. This may take a few minutes depending on your network. +4. Under **{{ml-app}} {{infer-cap}} Pipelines**, click the **Deploy** button in the **Improve your results with E5** section to begin downloading the E5 model. This may take a few minutes depending on your network. :::{image} ../../../images/machine-learning-ml-nlp-deploy-e5-es.png :alt: Deploying E5 in Elasticsearch diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-elser.md b/explore-analyze/machine-learning/nlp/ml-nlp-elser.md index 4f1e061004..ea986aa6bc 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-elser.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-elser.md @@ -123,7 +123,7 @@ Alternatively, you can download and deploy ELSER to an {{infer}} pipeline using 1. In {{kib}}, navigate to **Search** > **Indices**. 2. Select the index from the list that has an {{infer}} pipeline in which you want to use ELSER. 3. Navigate to the **Pipelines** tab. -4. Under **{{ml-app}} {infer-cap} Pipelines**, click the **Deploy** button to begin downloading the ELSER model. This may take a few minutes depending on your network. +4. Under **{{ml-app}} {{infer-cap}} Pipelines**, click the **Deploy** button to begin downloading the ELSER model. This may take a few minutes depending on your network. :::{image} ../../../images/machine-learning-ml-nlp-deploy-elser-v2-es.png :alt: Deploying ELSER in Elasticsearch diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-overview.md b/explore-analyze/machine-learning/nlp/ml-nlp-overview.md index 6660b41ef9..0e029e865b 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-overview.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-overview.md @@ -24,9 +24,9 @@ You can **store embeddings in your {{es}} vector database** if you generate [den ## What is NLP? [what-is-nlp] -Classically, NLP was performed using linguistic rules, dictionaries, regular expressions, and {{ml}} for specific tasks such as automatic categorization or summarization of text. In recent years, however, deep learning techniques have taken over much of the NLP landscape. Deep learning capitalizes on the availability of large scale data sets, cheap computation, and techniques for learning at scale with less human involvement. Pre-trained language models that use a transformer architecture have been particularly successful. For example, BERT is a pre-trained language model that was released by Google in 2018. Since that time, it has become the inspiration for most of today’s modern NLP techniques. The {{stack}} {ml} features are structured around BERT and transformer models. These features support BERT’s tokenization scheme (called WordPiece) and transformer models that conform to the standard BERT model interface. For the current list of supported architectures, refer to [Compatible third party models](ml-nlp-model-ref.md). +Classically, NLP was performed using linguistic rules, dictionaries, regular expressions, and {{ml}} for specific tasks such as automatic categorization or summarization of text. In recent years, however, deep learning techniques have taken over much of the NLP landscape. Deep learning capitalizes on the availability of large scale data sets, cheap computation, and techniques for learning at scale with less human involvement. Pre-trained language models that use a transformer architecture have been particularly successful. For example, BERT is a pre-trained language model that was released by Google in 2018. Since that time, it has become the inspiration for most of today’s modern NLP techniques. The {{stack}} {{ml}} features are structured around BERT and transformer models. These features support BERT’s tokenization scheme (called WordPiece) and transformer models that conform to the standard BERT model interface. For the current list of supported architectures, refer to [Compatible third party models](ml-nlp-model-ref.md). -To incorporate transformer models and make predictions, {{es}} uses libtorch, which is an underlying native library for PyTorch. Trained models must be in a TorchScript representation for use with {{stack}} {ml} features. +To incorporate transformer models and make predictions, {{es}} uses libtorch, which is an underlying native library for PyTorch. Trained models must be in a TorchScript representation for use with {{stack}} {{ml}} features. As in the cases of [classification](../data-frame-analytics/ml-dfa-classification.md) and [{{regression}}](../data-frame-analytics/ml-dfa-regression.md), after you deploy a model to your cluster, you can use it to make predictions (also known as *{{infer}}*) against incoming data. You can perform the following NLP operations: diff --git a/explore-analyze/machine-learning/setting-up-machine-learning.md b/explore-analyze/machine-learning/setting-up-machine-learning.md index e55b8b34d7..965d8addef 100644 --- a/explore-analyze/machine-learning/setting-up-machine-learning.md +++ b/explore-analyze/machine-learning/setting-up-machine-learning.md @@ -11,7 +11,7 @@ mapped_pages: ## Requirements overview [requirements-overview] -To use the {{stack}} {ml-features}, you must have: +To use the {{stack}} {{ml-features}}, you must have: * the [appropriate subscription](https://www.elastic.co/subscriptions) level or the free trial period activated * `xpack.ml.enabled` set to its default value of `true` on every node in the cluster (refer to [{{ml-cap}} settings in {{es}}](https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-settings.html)) diff --git a/explore-analyze/query-filter/tools/console.md b/explore-analyze/query-filter/tools/console.md index 2ec1fe8d1f..8d71601432 100644 --- a/explore-analyze/query-filter/tools/console.md +++ b/explore-analyze/query-filter/tools/console.md @@ -45,7 +45,7 @@ You can also find Console directly on certain Search solution and Elasticsearch ## Write requests [console-api] -**Console** accepts commands in a simplified HTTP request syntax. For example, the following `GET` request calls the {es} `_search` API: +**Console** accepts commands in a simplified HTTP request syntax. For example, the following `GET` request calls the {{es}} `_search` API: ```js GET /_search diff --git a/explore-analyze/report-and-share/reporting-troubleshooting-pdf.md b/explore-analyze/report-and-share/reporting-troubleshooting-pdf.md index 1e59ba71b4..3a581aea3d 100644 --- a/explore-analyze/report-and-share/reporting-troubleshooting-pdf.md +++ b/explore-analyze/report-and-share/reporting-troubleshooting-pdf.md @@ -87,7 +87,7 @@ The Puppeteer logs are very verbose and could possibly contain sensitive informa ## System requirements [reporting-troubleshooting-system-requirements] -In Elastic Cloud, the {{kib}} instances that most configurations provide by default is for 1GB of RAM for the instance. That is enough for {{kib}} {report-features} when the visualization or dashboard is relatively simple, such as a single pie chart or a dashboard with a few visualizations. However, certain visualization types incur more load than others. For example, a TSVB panel has a lot of network requests to render. +In Elastic Cloud, the {{kib}} instances that most configurations provide by default is for 1GB of RAM for the instance. That is enough for {{kib}} {{report-features}} when the visualization or dashboard is relatively simple, such as a single pie chart or a dashboard with a few visualizations. However, certain visualization types incur more load than others. For example, a TSVB panel has a lot of network requests to render. If the {{kib}} instance doesn’t have enough memory to run the report, the report fails with an error such as `Error: Page crashed!`. In this case, try increasing the memory for the {{kib}} instance to 2GB. diff --git a/explore-analyze/transforms/ecommerce-transforms.md b/explore-analyze/transforms/ecommerce-transforms.md index a627ca9150..0a47bf94cb 100644 --- a/explore-analyze/transforms/ecommerce-transforms.md +++ b/explore-analyze/transforms/ecommerce-transforms.md @@ -10,7 +10,7 @@ mapped_pages: [{{transforms-cap}}](../transforms.md) enable you to retrieve information from an {{es}} index, transform it, and store it in another index. Let’s use the [{{kib}} sample data](https://www.elastic.co/guide/en/kibana/current/get-started.html) to demonstrate how you can pivot and summarize your data with {{transforms}}. -1. Verify that your environment is set up properly to use {{transforms}}. If the {{es}} {security-features} are enabled, to complete this tutorial you need a user that has authority to preview and create {{transforms}}. You must also have specific index privileges for the source and destination indices. See [Setup](transform-setup.md). +1. Verify that your environment is set up properly to use {{transforms}}. If the {{es}} {{security-features}} are enabled, to complete this tutorial you need a user that has authority to preview and create {{transforms}}. You must also have specific index privileges for the source and destination indices. See [Setup](transform-setup.md). 2. Choose your *source index*. In this example, we’ll use the eCommerce orders sample data. If you’re not already familiar with the `kibana_sample_data_ecommerce` index, use the **Revenue** dashboard in {{kib}} to explore the data. Consider what insights you might want to derive from this eCommerce data. diff --git a/explore-analyze/transforms/transform-alerts.md b/explore-analyze/transforms/transform-alerts.md index 7e02fc7561..9a586c5e3f 100644 --- a/explore-analyze/transforms/transform-alerts.md +++ b/explore-analyze/transforms/transform-alerts.md @@ -8,7 +8,7 @@ mapped_pages: # Generating alerts for transforms [transform-alerts] -{{kib}} {alert-features} include support for {{transform}} health rules, which check the health of {{ctransforms}} with certain conditions. If the conditions of the rule are met, an alert is created and the associated actions run. For example, you can create a rule to check if a {{ctransform}} is started and to notify you in an email if it is not. To learn more about {{kib}} {alert-features}, refer to [Alerting](../alerts-cases/alerts/alerting-getting-started.md). +{{kib}} {{alert-features}} include support for {{transform}} health rules, which check the health of {{ctransforms}} with certain conditions. If the conditions of the rule are met, an alert is created and the associated actions run. For example, you can create a rule to check if a {{ctransform}} is started and to notify you in an email if it is not. To learn more about {{kib}} {{alert-features}}, refer to [Alerting](../alerts-cases/alerts/alerting-getting-started.md). ## Creating a rule [creating-transform-rules] diff --git a/explore-analyze/transforms/transform-usage.md b/explore-analyze/transforms/transform-usage.md index 9030290ef8..c2ba2a4305 100644 --- a/explore-analyze/transforms/transform-usage.md +++ b/explore-analyze/transforms/transform-usage.md @@ -16,7 +16,7 @@ You might want to consider using {{transforms}} instead of aggregations when: * You need a complete *feature index* rather than a top-N set of items. - In {{ml}}, you often need a complete set of behavioral features rather just the top-N. For example, if you are predicting customer churn, you might look at features such as the number of website visits in the last week, the total number of sales, or the number of emails sent. The {{stack}} {ml-features} create models based on this multi-dimensional feature space, so they benefit from the full feature indices that are created by {{transforms}}. + In {{ml}}, you often need a complete set of behavioral features rather just the top-N. For example, if you are predicting customer churn, you might look at features such as the number of website visits in the last week, the total number of sales, or the number of emails sent. The {{stack}} {{ml-features}} create models based on this multi-dimensional feature space, so they benefit from the full feature indices that are created by {{transforms}}. This scenario also applies when you are trying to search across the results of an aggregation or multiple aggregations. Aggregation results can be ordered or filtered, but there are [limitations to ordering](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html#search-aggregations-bucket-terms-aggregation-order) and [filtering by bucket selector](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-selector-aggregation.html) is constrained by the maximum number of buckets returned. If you want to search all aggregation results, you need to create the complete {{dataframe}}. If you need to sort or filter the aggregation results by multiple fields, {{transforms}} are particularly useful. diff --git a/explore-analyze/visualize/legacy-editors/tsvb.md b/explore-analyze/visualize/legacy-editors/tsvb.md index 9cfeb701c6..42412bc8a9 100644 --- a/explore-analyze/visualize/legacy-editors/tsvb.md +++ b/explore-analyze/visualize/legacy-editors/tsvb.md @@ -44,7 +44,7 @@ Creating **TSVB** visualizations with an {{es}} index string is deprecated and w 1. On the dashboard, click **Select type**, then select **TSVB**. 2. In **TSVB**, click **Panel options**, then specify the **Data** settings. 3. Open the **Data view mode** options next to the **Data view** dropdown. -4. Select **Use only {{kib}} {data-sources}**. +4. Select **Use only {{kib}} {{data-sources}}**. 5. From the **Data view** dropdown, select the {{data-source}}, then select the **Time field** and **Interval**. 6. Select a **Drop last bucket** option. diff --git a/manage-data/ingest/ingest-reference-architectures/agent-kafka-essink.md b/manage-data/ingest/ingest-reference-architectures/agent-kafka-essink.md index 4161ca894b..f7dfd21a30 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-kafka-essink.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-kafka-essink.md @@ -32,7 +32,7 @@ Info on {{agent}} and agent integrations: Info on {{ls}} and {{ls}} plugins: * [{{ls}} Reference](https://www.elastic.co/guide/en/logstash/current) -* [{{ls}} {agent} input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) +* [{{ls}} {{agent}} input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) * [{{ls}} Kafka output](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-kafka.html) Info on {{es}}: diff --git a/manage-data/ingest/ingest-reference-architectures/agent-kafka-ls.md b/manage-data/ingest/ingest-reference-architectures/agent-kafka-ls.md index 8f7292ccc7..c221cd2271 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-kafka-ls.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-kafka-ls.md @@ -32,7 +32,7 @@ Info on {{agent}} and agent integrations: Info on {{ls}} and {{ls}} Kafka plugins: * [{{ls}} Reference](https://www.elastic.co/guide/en/logstash/current) -* [{{ls}} {agent} input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) +* [{{ls}} {{agent}} input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) * [{{ls}} Kafka input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-kafka.html) * [{{ls}} Kafka output](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-kafka.html) * [{{ls}} Elasticsearch output](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) diff --git a/manage-data/ingest/ingest-reference-architectures/ls-enrich.md b/manage-data/ingest/ingest-reference-architectures/ls-enrich.md index 05487b8174..d4208cf10c 100644 --- a/manage-data/ingest/ingest-reference-architectures/ls-enrich.md +++ b/manage-data/ingest/ingest-reference-architectures/ls-enrich.md @@ -35,10 +35,10 @@ Info on configuring {{agent}}: For info on {{ls}} for enriching data, check out these sections in the [Logstash Reference](https://www.elastic.co/guide/en/logstash/current): -* [{{ls}} {agent} input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) +* [{{ls}} {{agent}} input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html) * [{{ls}} plugins for enriching data](https://www.elastic.co/guide/en/logstash/current/lookup-enrichment.html) * [Logstash filter plugins](https://www.elastic.co/guide/en/logstash/current/filter-plugins.html) -* [{{ls}} {es} output](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) +* [{{ls}} {{es}} output](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) Info on {{es}}: diff --git a/manage-data/ingest/ingest-reference-architectures/ls-multi.md b/manage-data/ingest/ingest-reference-architectures/ls-multi.md index d524889a81..e3b92f27a9 100644 --- a/manage-data/ingest/ingest-reference-architectures/ls-multi.md +++ b/manage-data/ingest/ingest-reference-architectures/ls-multi.md @@ -62,7 +62,7 @@ Info on configuring {{agent}}: Info on {{ls}} and {{ls}} outputs: * [{{ls}} Reference](https://www.elastic.co/guide/en/logstash/current) -* [{{ls}} {es} output plugin](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) +* [{{ls}} {{es}} output plugin](https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html) * [{{ls}} output plugins](https://www.elastic.co/guide/en/logstash/current/output-plugins.html) Info on {{es}}: diff --git a/manage-data/lifecycle/index-lifecycle-management/configure-lifecycle-policy.md b/manage-data/lifecycle/index-lifecycle-management/configure-lifecycle-policy.md index cda8a9927f..a6ddfec931 100644 --- a/manage-data/lifecycle/index-lifecycle-management/configure-lifecycle-policy.md +++ b/manage-data/lifecycle/index-lifecycle-management/configure-lifecycle-policy.md @@ -14,7 +14,7 @@ To use a policy to manage an index that doesn’t roll over, you can specify a l {{ilm-init}} policies are stored in the global cluster state and can be included in snapshots by setting `include_global_state` to `true` when you [take the snapshot](../../../deploy-manage/tools/snapshot-and-restore/create-snapshots.md). When the snapshot is restored, all of the policies in the global state are restored and any local policies with the same names are overwritten. ::::{important} -When you enable {{ilm}} for {{beats}} or the {{ls}} {es} output plugin, the necessary policies and configuration changes are applied automatically. You can modify the default policies, but you do not need to explicitly configure a policy or bootstrap an initial index. +When you enable {{ilm}} for {{beats}} or the {{ls}} {{es}} output plugin, the necessary policies and configuration changes are applied automatically. You can modify the default policies, but you do not need to explicitly configure a policy or bootstrap an initial index. :::: @@ -111,7 +111,7 @@ PUT _index_template/my_template When you set up policies for your own rolling indices, if you are not using the recommended [data streams](../../data-store/index-types/data-streams.md), you need to manually create the first index managed by a policy and designate it as the write index. ::::{important} -When you enable {{ilm}} for {{beats}} or the {{ls}} {es} output plugin, the necessary policies and configuration changes are applied automatically. You can modify the default policies, but you do not need to explicitly configure a policy or bootstrap an initial index. +When you enable {{ilm}} for {{beats}} or the {{ls}} {{es}} output plugin, the necessary policies and configuration changes are applied automatically. You can modify the default policies, but you do not need to explicitly configure a policy or bootstrap an initial index. :::: diff --git a/manage-data/lifecycle/index-lifecycle-management/index-management-in-kibana.md b/manage-data/lifecycle/index-lifecycle-management/index-management-in-kibana.md index 989e559716..53fa18e9c4 100644 --- a/manage-data/lifecycle/index-lifecycle-management/index-management-in-kibana.md +++ b/manage-data/lifecycle/index-lifecycle-management/index-management-in-kibana.md @@ -12,7 +12,7 @@ To use these features, go to **Stack Management** > **Index Management**. ## Required permissions [index-mgm-req-permissions] -If you use {{es}} {security-features}, the following [security privileges](../../../deploy-manage/users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md) are required: +If you use {{es}} {{security-features}}, the following [security privileges](../../../deploy-manage/users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md) are required: * The `monitor` cluster privilege to access {{kib}}'s **Index Management** features. * The `view_index_metadata` and `manage` index privileges to view a data stream or index’s data. diff --git a/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md b/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md index 2b2be1e605..43eef61ab4 100644 --- a/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md +++ b/manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md @@ -26,7 +26,7 @@ To automate rollover and management of a data stream with {{ilm-init}}, you: 3. [Verify indices are moving through the lifecycle phases](../index-lifecycle-management.md#ilm-gs-check-progress) as expected. ::::{important} -When you enable {{ilm}} for {{beats}} or the {{ls}} {es} output plugin, lifecycle policies are set up automatically. You do not need to take any other actions. You can modify the default policies through [{{kib}} Management](tutorial-customize-built-in-policies.md) or the {{ilm-init}} APIs. +When you enable {{ilm}} for {{beats}} or the {{ls}} {{es}} output plugin, lifecycle policies are set up automatically. You do not need to take any other actions. You can modify the default policies through [{{kib}} Management](tutorial-customize-built-in-policies.md) or the {{ilm-init}} APIs. :::: diff --git a/raw-migrated-files/docs-content/serverless/security-configure-endpoint-integration-policy.md b/raw-migrated-files/docs-content/serverless/security-configure-endpoint-integration-policy.md index 038709be5e..8f5ac272e5 100644 --- a/raw-migrated-files/docs-content/serverless/security-configure-endpoint-integration-policy.md +++ b/raw-migrated-files/docs-content/serverless/security-configure-endpoint-integration-policy.md @@ -70,7 +70,7 @@ These additional options are available for malware protection: Select **Notify user** to send a push notification in the host operating system when activity is detected or prevented. Notifications are enabled by default for the **Prevent** option. ::::{tip} -Endpoint Protection Complete customers can customize these notifications using the `Elastic Security {{action}} {filename}` syntax. +Endpoint Protection Complete customers can customize these notifications using the `Elastic Security {{action}} {{filename}}` syntax. :::: @@ -125,7 +125,7 @@ When ransomware protection is enabled, canary files placed in targeted locations Select **Notify user** to send a push notification in the host operating system when activity is detected or prevented. Notifications are enabled by default for the **Prevent** option. ::::{tip} -Endpoint Protection Complete customers can customize these notifications using the `Elastic Security {{action}} {filename}` syntax. +Endpoint Protection Complete customers can customize these notifications using the `Elastic Security {{action}} {{filename}}` syntax. :::: @@ -156,7 +156,7 @@ Memory threat protection levels are: Select **Notify user** to send a push notification in the host operating system when activity is detected or prevented. Notifications are enabled by default for the **Prevent** option. ::::{tip} -Endpoint Protection Complete customers can customize these notifications using the `Elastic Security {{action}} {rule}` syntax. +Endpoint Protection Complete customers can customize these notifications using the `Elastic Security {{action}} {{rule}}` syntax. :::: @@ -189,7 +189,7 @@ Select whether you want to use **Reputation service** for additional protection. Select **Notify user** to send a push notification in the host operating system when activity is detected or prevented. Notifications are enabled by default for the **Prevent** option. ::::{tip} -Endpoint Protection Complete customers can customize these notifications using the `Elastic Security {{action}} {rule}` syntax. +Endpoint Protection Complete customers can customize these notifications using the `Elastic Security {{action}} {{rule}}` syntax. :::: diff --git a/raw-migrated-files/docs-content/serverless/security-machine-learning.md b/raw-migrated-files/docs-content/serverless/security-machine-learning.md index e7afdf8e28..c2c892533c 100644 --- a/raw-migrated-files/docs-content/serverless/security-machine-learning.md +++ b/raw-migrated-files/docs-content/serverless/security-machine-learning.md @@ -7,7 +7,7 @@ You can view the details of detected anomalies within the `Anomalies` table widg ## Manage {{ml}} jobs [manage-jobs] -If you have the `machine_learning_admin` role, you can use the **ML job settings** interface on the **Alerts***, ***Rules**, and **Rule Exceptions** pages to view, start, and stop {{elastic-sec}} {ml} jobs. +If you have the `machine_learning_admin` role, you can use the **ML job settings** interface on the **Alerts***, ***Rules**, and **Rule Exceptions** pages to view, start, and stop {{elastic-sec}} {{ml}} jobs. :::{image} ../../../images/serverless--detections-machine-learning-ml-ui.png :alt: ML job settings UI on the Alerts page @@ -37,7 +37,7 @@ You can also check the status of {{ml}} detection rules, and start or stop their ### Prebuilt jobs [included-jobs] -{{elastic-sec}} comes with prebuilt {{ml}} {anomaly-jobs} for automatically detecting host and network anomalies. The jobs are displayed in the `Anomaly Detection` interface. They are available when either: +{{elastic-sec}} comes with prebuilt {{ml}} {{anomaly-jobs}} for automatically detecting host and network anomalies. The jobs are displayed in the `Anomaly Detection` interface. They are available when either: * You ship data using [Beats](https://www.elastic.co/products/beats) or the [{{agent}}](../../../solutions/security/configure-elastic-defend/install-elastic-defend.md), and {{kib}} is configured with the required index patterns (such as `auditbeat-*`, `filebeat-*`, `packetbeat-*`, or `winlogbeat-*` in **Project settings** → **Management** → **Index Management**). diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/active-directory-realm.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/active-directory-realm.md index 7709840c5c..15808105a0 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/active-directory-realm.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/active-directory-realm.md @@ -1,6 +1,6 @@ # Active Directory user authentication [active-directory-realm] -You can configure {{stack}} {security-features} to communicate with Active Directory to authenticate users. See [Configuring an Active Directory realm](../../../deploy-manage/users-roles/cluster-or-deployment-auth/active-directory.md#ad-realm-configuration). +You can configure {{stack}} {{security-features}} to communicate with Active Directory to authenticate users. See [Configuring an Active Directory realm](../../../deploy-manage/users-roles/cluster-or-deployment-auth/active-directory.md#ad-realm-configuration). The {{security-features}} use LDAP to communicate with Active Directory, so `active_directory` realms are similar to [`ldap` realms](../../../deploy-manage/users-roles/cluster-or-deployment-auth/ldap.md). Like LDAP directories, Active Directory stores users and groups hierarchically. The directory’s hierarchy is built from containers such as the *organizational unit* (`ou`), *organization* (`o`), and *domain component* (`dc`). diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks-xpack.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks-xpack.md index e197284dcf..5885d79490 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks-xpack.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/bootstrap-checks-xpack.md @@ -12,7 +12,7 @@ To pass this bootstrap check, you must set the `xpack.watcher.encryption_key` on ## PKI realm check [bootstrap-checks-xpack-pki-realm] -If you use {{es}} {security-features} and a Public Key Infrastructure (PKI) realm, you must configure Transport Layer Security (TLS) on your cluster and enable client authentication on the network layers (either transport or http). For more information, see [PKI user authentication](../../../deploy-manage/users-roles/cluster-or-deployment-auth/pki.md) and [Set up basic security plus HTTPS](../../../deploy-manage/security/set-up-basic-security-plus-https.md). +If you use {{es}} {{security-features}} and a Public Key Infrastructure (PKI) realm, you must configure Transport Layer Security (TLS) on your cluster and enable client authentication on the network layers (either transport or http). For more information, see [PKI user authentication](../../../deploy-manage/users-roles/cluster-or-deployment-auth/pki.md) and [Set up basic security plus HTTPS](../../../deploy-manage/security/set-up-basic-security-plus-https.md). To pass this bootstrap check, if a PKI realm is enabled, you must configure TLS and enable client authentication on at least one network communication layer. @@ -28,7 +28,7 @@ To pass this bootstrap check, the role mapping files must exist and must be vali ## SSL/TLS check [bootstrap-checks-tls] -If you enable {{es}} {security-features}, unless you have a trial license, you must configure SSL/TLS for internode-communication. +If you enable {{es}} {{security-features}}, unless you have a trial license, you must configure SSL/TLS for internode-communication. ::::{note} Single-node clusters that use a loopback interface do not have this requirement. For more information, see [*Start the {{stack}} with security enabled automatically*](../../../deploy-manage/security/security-certificates-keys.md). @@ -40,7 +40,7 @@ To pass this bootstrap check, you must [set up SSL/TLS in your cluster](../../.. ## Token SSL check [bootstrap-checks-xpack-token-ssl] -If you use {{es}} {security-features} and the built-in token service is enabled, you must configure your cluster to use SSL/TLS for the HTTP interface. HTTPS is required in order to use the token service. +If you use {{es}} {{security-features}} and the built-in token service is enabled, you must configure your cluster to use SSL/TLS for the HTTP interface. HTTPS is required in order to use the token service. In particular, if `xpack.security.authc.token.enabled` is set to `true` in the `elasticsearch.yml` file, you must also set `xpack.security.http.ssl.enabled` to `true`. For more information about these settings, see [Security settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html) and [Advanced HTTP settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#http-settings). diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/built-in-roles.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/built-in-roles.md index 84bacdd4d4..d96a6ca266 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/built-in-roles.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/built-in-roles.md @@ -26,7 +26,7 @@ $$$built-in-roles-editor$$$ `editor` ::::{note} * This role provides read access to any index that is not prefixed with a dot. * This role automatically grants full access to new {{kib}} features as soon as they are released. - * Some {{kib}} features may also require creation or write access to data indices. {{ml-cap}} {dfanalytics-jobs} is an example. For such features those privileges must be defined in a separate role. + * Some {{kib}} features may also require creation or write access to data indices. {{ml-cap}} {{dfanalytics-jobs}} is an example. For such features those privileges must be defined in a separate role. :::: diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/defining-roles.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/defining-roles.md index ca42b0b463..bb0dfeb5b9 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/defining-roles.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/defining-roles.md @@ -113,7 +113,7 @@ The following describes the structure of an application privileges entry: 1. The name of the application. 2. The list of the names of the application privileges to grant to this role. -3. The resources to which those privileges apply. These are handled in the same way as index name pattern in `indices` permissions. These resources do not have any special meaning to the {{es}} {security-features}. +3. The resources to which those privileges apply. These are handled in the same way as index name pattern in `indices` permissions. These resources do not have any special meaning to the {{es}} {{security-features}}. For details about the validation rules for these fields, see the [add application privileges API](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-privileges.html). diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/file-realm.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/file-realm.md index 2c0b27ee9e..28c497dbed 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/file-realm.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/file-realm.md @@ -3,7 +3,7 @@ You can manage and authenticate users with the built-in `file` realm. With the `file` realm, users are defined in local files on each node in the cluster. ::::{important} -As the administrator of the cluster, it is your responsibility to ensure the same users are defined on every node in the cluster. The {{stack}} {security-features} do not deliver any mechanism to guarantee this. You should also be aware that you cannot add or manage users in the `file` realm via the [user APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api.html#security-user-apis) and you cannot add or manage them in {{kib}} on the **Management / Security / Users** page +As the administrator of the cluster, it is your responsibility to ensure the same users are defined on every node in the cluster. The {{stack}} {{security-features}} do not deliver any mechanism to guarantee this. You should also be aware that you cannot add or manage users in the `file` realm via the [user APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api.html#security-user-apis) and you cannot add or manage them in {{kib}} on the **Management / Security / Users** page :::: @@ -67,7 +67,7 @@ A safer approach would be to apply the change on one of the nodes and have the f While it is possible to modify the `users` files directly using any standard text editor, we strongly recommend using the [*elasticsearch-users*](https://www.elastic.co/guide/en/elasticsearch/reference/current/users-command.html) tool to apply the required changes. ::::{important} - As the administrator of the cluster, it is your responsibility to ensure the same users are defined on every node in the cluster. The {{es}} {security-features} do not deliver any mechanisms to guarantee this. + As the administrator of the cluster, it is your responsibility to ensure the same users are defined on every node in the cluster. The {{es}} {{security-features}} do not deliver any mechanisms to guarantee this. :::: 4. Add role information to the `ES_PATH_CONF/users_roles` file on each node in the cluster. diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/index-lifecycle-management.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/index-lifecycle-management.md index 77e3ff5c8f..c647e90fc0 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/index-lifecycle-management.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/index-lifecycle-management.md @@ -6,7 +6,7 @@ You can configure {{ilm}} ({{ilm-init}}) policies to automatically manage indice * Create a new index each day, week, or month and archive previous ones * Delete stale indices to enforce data retention standards -You can create and manage index lifecycle policies through {{kib}} Management or the {{ilm-init}} APIs. Default {{ilm}} policies are created automatically when you use {{agent}}, {{beats}}, or the {{ls}} {es} output plugin to send data to the {{stack}}. +You can create and manage index lifecycle policies through {{kib}} Management or the {{ilm-init}} APIs. Default {{ilm}} policies are created automatically when you use {{agent}}, {{beats}}, or the {{ls}} {{es}} output plugin to send data to the {{stack}}. ![index lifecycle policies](../../../images/elasticsearch-reference-index-lifecycle-policies.png "") diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/index-mgmt.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/index-mgmt.md index 78c06fccf2..1587e6a498 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/index-mgmt.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/index-mgmt.md @@ -7,7 +7,7 @@ To use these features, go to **Stack Management** > **Index Management**. ## Required permissions [index-mgm-req-permissions] -If you use {{es}} {security-features}, the following [security privileges](../../../deploy-manage/users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md) are required: +If you use {{es}} {{security-features}}, the following [security privileges](../../../deploy-manage/users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md) are required: * The `monitor` cluster privilege to access {{kib}}'s **Index Management** features. * The `view_index_metadata` and `manage` index privileges to view a data stream or index’s data. diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/ip-filtering.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/ip-filtering.md index 62c7ec7dde..f8f8fc6baf 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/ip-filtering.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/ip-filtering.md @@ -2,17 +2,17 @@ You can apply IP filtering to application clients, node clients, or transport clients, remote cluster clients, in addition to other nodes that are attempting to join the cluster. -If a node’s IP address is on the denylist, the {{es}} {security-features} allow the connection to {{es}} but it is be dropped immediately and no requests are processed. +If a node’s IP address is on the denylist, the {{es}} {{security-features}} allow the connection to {{es}} but it is be dropped immediately and no requests are processed. ::::{note} -Elasticsearch installations are not designed to be publicly accessible over the Internet. IP Filtering and the other capabilities of the {{es}} {security-features} do not change this condition. +Elasticsearch installations are not designed to be publicly accessible over the Internet. IP Filtering and the other capabilities of the {{es}} {{security-features}} do not change this condition. :::: ## Enabling IP filtering [_enabling_ip_filtering] -The {{es}} {security-features} contain an access control feature that allows or rejects hosts, domains, or subnets. If the [{{operator-feature}}](../../../deploy-manage/users-roles/cluster-or-deployment-auth/operator-privileges.md) is enabled, only operator users can update these settings. +The {{es}} {{security-features}} contain an access control feature that allows or rejects hosts, domains, or subnets. If the [{{operator-feature}}](../../../deploy-manage/users-roles/cluster-or-deployment-auth/operator-privileges.md) is enabled, only operator users can update these settings. You configure IP filtering by specifying the `xpack.security.transport.filter.allow` and `xpack.security.transport.filter.deny` settings in `elasticsearch.yml`. Allow rules take precedence over the deny rules. @@ -66,7 +66,7 @@ xpack.security.http.filter.enabled: true ## Specifying TCP transport profiles [_specifying_tcp_transport_profiles] -[TCP transport profiles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#transport-profiles) enable Elasticsearch to bind on multiple hosts. The {{es}} {security-features} enable you to apply different IP filtering on different profiles. +[TCP transport profiles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#transport-profiles) enable Elasticsearch to bind on multiple hosts. The {{es}} {{security-features}} enable you to apply different IP filtering on different profiles. ```yaml xpack.security.transport.filter.allow: 172.16.0.0/24 diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/kerberos-realm.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/kerberos-realm.md index ae2f4671c0..a6793106e7 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/kerberos-realm.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/kerberos-realm.md @@ -1,6 +1,6 @@ # Kerberos authentication [kerberos-realm] -You can configure the {{stack}} {security-features} to support Kerberos V5 authentication, an industry standard protocol to authenticate users in {{es}}. +You can configure the {{stack}} {{security-features}} to support Kerberos V5 authentication, an industry standard protocol to authenticate users in {{es}}. ::::{note} You cannot use the Kerberos realm to authenticate on the transport network layer. diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/ldap-realm.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/ldap-realm.md index 1494bc6453..318166c697 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/ldap-realm.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/ldap-realm.md @@ -1,6 +1,6 @@ # LDAP user authentication [ldap-realm] -You can configure the {{stack}} {security-features} to communicate with a Lightweight Directory Access Protocol (LDAP) server to authenticate users. See [Configuring an LDAP realm](../../../deploy-manage/users-roles/cluster-or-deployment-auth/ldap.md#ldap-realm-configuration). +You can configure the {{stack}} {{security-features}} to communicate with a Lightweight Directory Access Protocol (LDAP) server to authenticate users. See [Configuring an LDAP realm](../../../deploy-manage/users-roles/cluster-or-deployment-auth/ldap.md#ldap-realm-configuration). LDAP stores users and groups hierarchically, similar to the way folders are grouped in a file system. An LDAP directory’s hierarchy is built from containers such as the *organizational unit* (`ou`), *organization* (`o`), and *domain component* (`dc`). @@ -95,7 +95,7 @@ To integrate with LDAP, you configure an `ldap` realm and map LDAP groups to use 4. (Optional) Configure how the {{security-features}} interact with multiple LDAP servers. - The `load_balance.type` setting can be used at the realm level. The {{es}} {security-features} support both failover and load balancing modes of operation. See [LDAP realm settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#ref-ldap-settings). + The `load_balance.type` setting can be used at the realm level. The {{es}} {{security-features}} support both failover and load balancing modes of operation. See [LDAP realm settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#ref-ldap-settings). 5. (Optional) To protect passwords, [encrypt communications between {{es}} and the LDAP server](../../../deploy-manage/users-roles/cluster-or-deployment-auth/ldap.md#tls-ldap). 6. Restart {{es}}. diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/mapping-roles.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/mapping-roles.md index 7dbd3afde8..1cf2247bed 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/mapping-roles.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/mapping-roles.md @@ -17,7 +17,7 @@ When [anonymous access](../../../deploy-manage/users-roles/cluster-or-deployment :::: -If there are role-mapping rules created through the API as well as a role mapping file, the rules are combined. It’s possible for a single user to have some roles that were mapped through the API, and others assigned based on the role mapping file. You can define role-mappings via an [API](../../../deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md#mapping-roles-api) or manage them through [files](../../../deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md#mapping-roles-file). These two sources of role-mapping are combined inside of the {{es}} {security-features}, so it is possible for a single user to have some roles that have been mapped through the API, and other roles that are mapped through files. +If there are role-mapping rules created through the API as well as a role mapping file, the rules are combined. It’s possible for a single user to have some roles that were mapped through the API, and others assigned based on the role mapping file. You can define role-mappings via an [API](../../../deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md#mapping-roles-api) or manage them through [files](../../../deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles.md#mapping-roles-file). These two sources of role-mapping are combined inside of the {{es}} {{security-features}}, so it is possible for a single user to have some roles that have been mapped through the API, and other roles that are mapped through files. ::::{note} Users with no roles assigned will be unauthorized for any action. In other words, they may be able to authenticate, but they will have no roles. No roles means no privileges, and no privileges means no authorizations to make requests. @@ -62,7 +62,7 @@ You cannot view, edit, or remove any roles that are defined in the role mapping To specify users and groups in the role mappings, you use their *Distinguished Names* (DNs). A DN is a string that uniquely identifies the user or group, for example `"cn=John Doe,cn=contractors,dc=example,dc=com"`. ::::{note} -The {{es}} {security-features} support only Active Directory security groups. You cannot map distribution groups to roles. +The {{es}} {{security-features}} support only Active Directory security groups. You cannot map distribution groups to roles. :::: diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/monitor-elasticsearch-cluster.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/monitor-elasticsearch-cluster.md index 55247fd47c..14f8da34f2 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/monitor-elasticsearch-cluster.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/monitor-elasticsearch-cluster.md @@ -1,6 +1,6 @@ # Monitor a cluster [monitor-elasticsearch-cluster] -The {{stack}} {monitor-features} provide a way to keep a pulse on the health and performance of your {{es}} cluster. +The {{stack}} {{monitor-features}} provide a way to keep a pulse on the health and performance of your {{es}} cluster. * [Overview](../../../deploy-manage/monitor/stack-monitoring.md) * [How it works](../../../deploy-manage/monitor/stack-monitoring.md) diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/monitoring-production.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/monitoring-production.md index 32129be86a..4b964e862e 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/monitoring-production.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/monitoring-production.md @@ -38,7 +38,7 @@ To store monitoring data in a separate cluster: } ``` - 2. If the {{es}} {security-features} are enabled on the monitoring cluster, create users that can send and retrieve monitoring data: + 2. If the {{es}} {{security-features}} are enabled on the monitoring cluster, create users that can send and retrieve monitoring data: ::::{note} If you plan to use {{kib}} to view monitoring data, username and password credentials must be valid on both the {{kib}} server and the monitoring cluster. diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/native-realm.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/native-realm.md index 6010f7080e..3c829e75fc 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/native-realm.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/native-realm.md @@ -36,7 +36,7 @@ You can configure a `native` realm in the `xpack.security.authc.realms.native` n ## Managing native users [managing-native-users] -The {{stack}} {security-features} enable you to easily manage users in {{kib}} on the **Management / Security / Users** page. +The {{stack}} {{security-features}} enable you to easily manage users in {{kib}} on the **Management / Security / Users** page. Alternatively, you can manage users through the `user` API. For more information and examples, see [Users](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api.html#security-user-apis). diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/oidc-realm.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/oidc-realm.md index a6a9a5d213..276bc22f77 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/oidc-realm.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/oidc-realm.md @@ -2,7 +2,7 @@ The OpenID Connect realm enables {{es}} to serve as an OpenID Connect Relying Party (RP) and provides single sign-on (SSO) support in {{kib}}. -It is specifically designed to support authentication via an interactive web browser, so it does not operate as a standard authentication realm. Instead, there are {{kib}} and {{es}} {security-features} that work together to enable interactive OpenID Connect sessions. +It is specifically designed to support authentication via an interactive web browser, so it does not operate as a standard authentication realm. Instead, there are {{kib}} and {{es}} {{security-features}} that work together to enable interactive OpenID Connect sessions. This means that the OpenID Connect realm is not suitable for use by standard REST clients. If you configure an OpenID Connect realm for use in {{kib}}, you should also configure another realm, such as the [native realm](../../../deploy-manage/users-roles/cluster-or-deployment-auth/native.md) in your authentication chain. diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/role-mapping-resources.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/role-mapping-resources.md index 95d36afa1d..97c61da47b 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/role-mapping-resources.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/role-mapping-resources.md @@ -49,7 +49,7 @@ The value specified in the field rule can be one of the following types: The *user object* against which rules are evaluated has the following fields: `username` -: (string) The username by which the {{es}} {security-features} knows this user. For example, `"username": "jsmith"`. +: (string) The username by which the {{es}} {{security-features}} knows this user. For example, `"username": "jsmith"`. `dn` : (string) The *Distinguished Name* of the user. For example, `"dn": "cn=jsmith,ou=users,dc=example,dc=com",`. diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/saml-guide-stack.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/saml-guide-stack.md index bd435624f0..5466a25ddc 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/saml-guide-stack.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/saml-guide-stack.md @@ -365,7 +365,7 @@ If you wish to sign some, but not all outgoing **SAML messages**, then you shoul #### Configuring {{es}} for encrypted messages [_configuring_es_for_encrypted_messages] -The {{es}} {security-features} support a single key for message decryption. If a key is configured, then {{es}} attempts to use it to decrypt `EncryptedAssertion` and `EncryptedAttribute` elements in Authentication responses, and `EncryptedID` elements in Logout requests. +The {{es}} {{security-features}} support a single key for message decryption. If a key is configured, then {{es}} attempts to use it to decrypt `EncryptedAssertion` and `EncryptedAttribute` elements in Authentication responses, and `EncryptedID` elements in Logout requests. {{es}} rejects any SAML message that contains an `EncryptedAssertion` that cannot be decrypted. diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/saml-realm.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/saml-realm.md index dca144d949..aa83a95435 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/saml-realm.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/saml-realm.md @@ -1,8 +1,8 @@ # SAML authentication [saml-realm] -The {{stack}} {security-features} support user authentication using SAML single sign-on (SSO). The {{security-features}} provide this support using the Web Browser SSO profile of the SAML 2.0 protocol. +The {{stack}} {{security-features}} support user authentication using SAML single sign-on (SSO). The {{security-features}} provide this support using the Web Browser SSO profile of the SAML 2.0 protocol. -This protocol is specifically designed to support authentication via an interactive web browser, so it does not operate as a standard authentication realm. Instead, there are {{kib}} and {{es}} {security-features} that work together to enable interactive SAML sessions. +This protocol is specifically designed to support authentication via an interactive web browser, so it does not operate as a standard authentication realm. Instead, there are {{kib}} and {{es}} {{security-features}} that work together to enable interactive SAML sessions. This means that the SAML realm is not suitable for use by standard REST clients. If you configure a SAML realm for use in {{kib}}, you should also configure another realm, such as the [native realm](../../../deploy-manage/users-roles/cluster-or-deployment-auth/native.md) in your authentication chain. diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/secure-monitoring.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/secure-monitoring.md index f86a923624..93c5961f1f 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/secure-monitoring.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/secure-monitoring.md @@ -1,6 +1,6 @@ # Monitoring and security [secure-monitoring] -The {{stack}} {monitor-features} consist of two components: an agent that you install on each {{es}} and Logstash node, and a Monitoring UI in {{kib}}. The monitoring agent collects and indexes metrics from the nodes and you visualize the data through the Monitoring dashboards in {{kib}}. The agent can index data on the same {{es}} cluster, or send it to an external monitoring cluster. +The {{stack}} {{monitor-features}} consist of two components: an agent that you install on each {{es}} and Logstash node, and a Monitoring UI in {{kib}}. The monitoring agent collects and indexes metrics from the nodes and you visualize the data through the Monitoring dashboards in {{kib}}. The agent can index data on the same {{es}} cluster, or send it to an external monitoring cluster. To use the {{monitor-features}} with the {{security-features}} enabled, you need to [set up {{kib}} to work with the {{security-features}}](../../../deploy-manage/security.md) and create at least one user for the Monitoring UI. If you are using an external monitoring cluster, you also need to configure a user for the monitoring agent and configure the agent to use the appropriate credentials when communicating with the monitoring cluster. diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/security-files.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/security-files.md index 42d17f014f..b175b11e12 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/security-files.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/security-files.md @@ -1,6 +1,6 @@ # Security files [security-files] -The {{es}} {security-features} use the following files: +The {{es}} {{security-features}} use the following files: * `ES_PATH_CONF/roles.yml` defines the roles in use on the cluster. See [Defining roles](../../../deploy-manage/users-roles/cluster-or-deployment-auth/defining-roles.md). * `ES_PATH_CONF/elasticsearch-users` defines the users and their hashed passwords for the `file` realm. See [File-based user authentication](../../../deploy-manage/users-roles/cluster-or-deployment-auth/file-based.md). diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/xpack-rollup.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/xpack-rollup.md index 2b1172d2bb..9e75fc69c4 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/xpack-rollup.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/xpack-rollup.md @@ -9,7 +9,7 @@ Rollups will be removed in a future version. Please [migrate](../../../manage-da Keeping historical data around for analysis is extremely useful but often avoided due to the financial cost of archiving massive amounts of data. Retention periods are thus driven by financial realities rather than by the usefulness of extensive historical data. -The {{stack}} {rollup-features} provide a means to summarize and store historical data so that it can still be used for analysis, but at a fraction of the storage cost of raw data. +The {{stack}} {{rollup-features}} provide a means to summarize and store historical data so that it can still be used for analysis, but at a fraction of the storage cost of raw data. * [Overview](../../../manage-data/lifecycle/rollup.md) * [Getting started](../../../manage-data/lifecycle/rollup/getting-started-with-rollups.md) diff --git a/raw-migrated-files/ingest-docs/fleet/fleet-agent-serverless-restrictions.md b/raw-migrated-files/ingest-docs/fleet/fleet-agent-serverless-restrictions.md index 644e9bf7d2..eaf94d4ced 100644 --- a/raw-migrated-files/ingest-docs/fleet/fleet-agent-serverless-restrictions.md +++ b/raw-migrated-files/ingest-docs/fleet/fleet-agent-serverless-restrictions.md @@ -24,7 +24,7 @@ $$$outputs-serverless-restrictions$$$ The path to get to the {{fleet}} application in {{kib}} differs across projects: * In {{ess}} deployments, navigate to **Management > Fleet**. -* In {{serverless-short}} {observability} projects, navigate to **Project settings > Fleet**. +* In {{serverless-short}} {{observability}} projects, navigate to **Project settings > Fleet**. * In {{serverless-short}} Security projects, navigate to **Assets > Fleet**. @@ -32,5 +32,5 @@ The path to get to the {{fleet}} application in {{kib}} differs across projects: Note the following restrictions with using {{fleet-server}} on {{serverless-short}}: -* On-premises {{fleet-server}} is not currently available for use in a {{serverless-short}} environment. We recommend using the hosted {{fleet-server}} that is included and configured automatically in {{serverless-short}} {observability} and Security projects. +* On-premises {{fleet-server}} is not currently available for use in a {{serverless-short}} environment. We recommend using the hosted {{fleet-server}} that is included and configured automatically in {{serverless-short}} {{observability}} and Security projects. * On {{serverless-short}}, you can configure {{fleet-server}} to use a proxy, with the restriction that the {{fleet-server}} host URL is fixed. Any new {{fleet-server}} hosts must use the default {{fleet-server}} host URL. diff --git a/raw-migrated-files/kibana/kibana/Security-production-considerations.md b/raw-migrated-files/kibana/kibana/Security-production-considerations.md index 4cfde38e71..d6c0d3c9f2 100644 --- a/raw-migrated-files/kibana/kibana/Security-production-considerations.md +++ b/raw-migrated-files/kibana/kibana/Security-production-considerations.md @@ -15,9 +15,9 @@ You should use SSL/TLS encryption to ensure that traffic between browsers and th encrypt-kibana-http -## Use {{stack}} {security-features} [configuring-kibana-shield] +## Use {{stack}} {{security-features}} [configuring-kibana-shield] -You can use {{stack}} {security-features} to control what {{es}} data users can access through {{kib}}. +You can use {{stack}} {{security-features}} to control what {{es}} data users can access through {{kib}}. When {{security-features}} are enabled, {{kib}} users have to log in. They must have a role granting [{{kib}} privileges](../../../deploy-manage/users-roles/cluster-or-deployment-auth/kibana-privileges.md) and access to the indices that they will be working with in {{kib}}. diff --git a/raw-migrated-files/kibana/kibana/elasticsearch-mutual-tls.md b/raw-migrated-files/kibana/kibana/elasticsearch-mutual-tls.md index 234b336438..08dbb19b67 100644 --- a/raw-migrated-files/kibana/kibana/elasticsearch-mutual-tls.md +++ b/raw-migrated-files/kibana/kibana/elasticsearch-mutual-tls.md @@ -11,7 +11,7 @@ TLS requires X.509 certificates to authenticate the communicating parties and pe In a standard TLS configuration, the server presents a signed certificate to authenticate itself to the client. In a mutual TLS configuration, the client also presents a signed certificate to authenticate itself to the server. -{{es}} {security-features} are enabled on your cluster by default, so each request that {{kib}} (the client) makes to {{es}} (the server) is authenticated. Most requests made by end users through {{kib}} to {{es}} are authenticated by using the credentials of the logged-in user. +{{es}} {{security-features}} are enabled on your cluster by default, so each request that {{kib}} (the client) makes to {{es}} (the server) is authenticated. Most requests made by end users through {{kib}} to {{es}} are authenticated by using the credentials of the logged-in user. To [enroll {{kib}} with an {{es}} cluster](../../../deploy-manage/security/security-certificates-keys.md#stack-start-with-security), you pass a generated enrollment token. This token configures {{kib}} to authenticate with {{es}} using a [service account token](../../../deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts.md#service-accounts-tokens). {{kib}} also supports mutual TLS authentication with {{es}} via a [Public Key Infrastructure (PKI) realm](../../../deploy-manage/users-roles/cluster-or-deployment-auth/pki.md). With this setup, {{es}} needs to verify the signature on the {{kib}} client certificate, and it also needs to map the client certificate’s distinguished name (DN) to the appropriate `kibana_system` role. diff --git a/raw-migrated-files/kibana/kibana/xpack-security.md b/raw-migrated-files/kibana/kibana/xpack-security.md index 95cd75f2d3..0f05e18821 100644 --- a/raw-migrated-files/kibana/kibana/xpack-security.md +++ b/raw-migrated-files/kibana/kibana/xpack-security.md @@ -1,6 +1,6 @@ # Security [xpack-security] -The {{stack}} {security-features} enable you to easily secure a cluster. With security, you can password-protect your data as well as implement more advanced security measures such as encrypting communications, role-based access control, IP filtering, and auditing. For more information, see [Secure a cluster](../../../deploy-manage/security.md) and [Configuring Security in {{kib}}](../../../deploy-manage/security.md). +The {{stack}} {{security-features}} enable you to easily secure a cluster. With security, you can password-protect your data as well as implement more advanced security measures such as encrypting communications, role-based access control, IP filtering, and auditing. For more information, see [Secure a cluster](../../../deploy-manage/security.md) and [Configuring Security in {{kib}}](../../../deploy-manage/security.md). ::::{note} There are security limitations that affect {{kib}}. For more information, refer to [Security](../../../deploy-manage/security.md). diff --git a/raw-migrated-files/observability-docs/observability/apm-open-telemetry-direct.md b/raw-migrated-files/observability-docs/observability/apm-open-telemetry-direct.md index 7cfb994a70..1f909ffa64 100644 --- a/raw-migrated-files/observability-docs/observability/apm-open-telemetry-direct.md +++ b/raw-migrated-files/observability-docs/observability/apm-open-telemetry-direct.md @@ -73,7 +73,7 @@ service: You’re now ready to export traces and metrics from your services and applications. ::::{tip} -When using the OpenTelemetry Collector, you should always prefer sending data via the [`OTLP` exporter](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter) to an Elastic APM Server. Other methods, like using the [`elasticsearch` exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter) to send data directly to {{es}} will send data to the {{stack}}, but will bypass all of the validation and data processing that the APM Server performs. In addition, your data will not be viewable in the {{kib}} {observability} apps if you use the `elasticsearch` exporter. +When using the OpenTelemetry Collector, you should always prefer sending data via the [`OTLP` exporter](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter) to an Elastic APM Server. Other methods, like using the [`elasticsearch` exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter) to send data directly to {{es}} will send data to the {{stack}}, but will bypass all of the validation and data processing that the APM Server performs. In addition, your data will not be viewable in the {{kib}} {{observability}} apps if you use the `elasticsearch` exporter. :::: diff --git a/raw-migrated-files/security-docs/security/configure-endpoint-integration-policy.md b/raw-migrated-files/security-docs/security/configure-endpoint-integration-policy.md index 6674df299d..756c2e9ed9 100644 --- a/raw-migrated-files/security-docs/security/configure-endpoint-integration-policy.md +++ b/raw-migrated-files/security-docs/security/configure-endpoint-integration-policy.md @@ -64,7 +64,7 @@ These additional options are available for malware protection: Select **Notify user** to send a push notification in the host operating system when activity is detected or prevented. Notifications are enabled by default for the **Prevent** option. ::::{tip} -Platinum and Enterprise customers can customize these notifications using the `Elastic Security {{action}} {filename}` syntax. +Platinum and Enterprise customers can customize these notifications using the `Elastic Security {{action}} {{filename}}` syntax. :::: @@ -111,7 +111,7 @@ When ransomware protection is enabled, canary files placed in targeted locations Select **Notify user** to send a push notification in the host operating system when activity is detected or prevented. Notifications are enabled by default for the **Prevent** option. ::::{tip} -Platinum and Enterprise customers can customize these notifications using the `Elastic Security {{action}} {filename}` syntax. +Platinum and Enterprise customers can customize these notifications using the `Elastic Security {{action}} {{filename}}` syntax. :::: @@ -135,7 +135,7 @@ Memory threat protection levels are: Select **Notify user** to send a push notification in the host operating system when activity is detected or prevented. Notifications are enabled by default for the **Prevent** option. ::::{tip} -Platinum and Enterprise customers can customize these notifications using the `Elastic Security {{action}} {rule}` syntax. +Platinum and Enterprise customers can customize these notifications using the `Elastic Security {{action}} {{rule}}` syntax. :::: @@ -166,7 +166,7 @@ Reputation service requires an active [Platinum or Enterprise subscription](http Select **Notify user** to send a push notification in the host operating system when activity is detected or prevented. Notifications are enabled by default for the **Prevent** option. ::::{tip} -Platinum and Enterprise customers can customize these notifications using the `Elastic Security {{action}} {rule}` syntax. +Platinum and Enterprise customers can customize these notifications using the `Elastic Security {{action}} {{rule}}` syntax. :::: diff --git a/raw-migrated-files/security-docs/security/data-views-in-sec.md b/raw-migrated-files/security-docs/security/data-views-in-sec.md index fea5838273..7d4d969e1c 100644 --- a/raw-migrated-files/security-docs/security/data-views-in-sec.md +++ b/raw-migrated-files/security-docs/security/data-views-in-sec.md @@ -21,7 +21,7 @@ You can tell which {{data-source}} is active by clicking the **{{data-source-cap To learn how to modify the default **Security Default Data View**, refer to [Update default {{elastic-sec}} indices](../../../solutions/security/get-started/configure-advanced-settings.md#update-sec-indices). -To learn how to modify, create, or delete another {{data-source}} refer to [{{kib}} {data-sources-cap}](../../../explore-analyze/find-and-organize/data-views.md). +To learn how to modify, create, or delete another {{data-source}} refer to [{{kib}} {{data-sources-cap}}](../../../explore-analyze/find-and-organize/data-views.md). You can also temporarily modify the active {{data-source}} from the **{{data-source-cap}}** menu by clicking **Advanced options**, then adding or removing index patterns. diff --git a/raw-migrated-files/security-docs/security/machine-learning.md b/raw-migrated-files/security-docs/security/machine-learning.md index 5f5dc63415..51a0533d3f 100644 --- a/raw-migrated-files/security-docs/security/machine-learning.md +++ b/raw-migrated-files/security-docs/security/machine-learning.md @@ -7,7 +7,7 @@ You can view the details of detected anomalies within the `Anomalies` table widg ## Manage {{ml}} jobs [manage-jobs] -If you have the `machine_learning_admin` role, you can use the **ML job settings** interface on the **Alerts**, **Rules**, and **Rule Exceptions** pages to view, start, and stop {{elastic-sec}} {ml} jobs. +If you have the `machine_learning_admin` role, you can use the **ML job settings** interface on the **Alerts**, **Rules**, and **Rule Exceptions** pages to view, start, and stop {{elastic-sec}} {{ml}} jobs. :::{image} ../../../images/security-ml-ui.png :alt: ML job settings UI on the Alerts page @@ -37,7 +37,7 @@ You can also check the status of {{ml}} detection rules, and start or stop their ### Prebuilt jobs [included-jobs] -{{elastic-sec}} comes with prebuilt {{ml}} {anomaly-jobs} for automatically detecting host and network anomalies. The jobs are displayed in the `Anomaly Detection` interface. They are available when either: +{{elastic-sec}} comes with prebuilt {{ml}} {{anomaly-jobs}} for automatically detecting host and network anomalies. The jobs are displayed in the `Anomaly Detection` interface. They are available when either: * You ship data using [Beats](https://www.elastic.co/products/beats) or the [{{agent}}](../../../solutions/security/configure-elastic-defend/install-elastic-defend.md), and {{kib}} is configured with the required index patterns (such as `auditbeat-*`, `filebeat-*`, `packetbeat-*`, or `winlogbeat-*`) on the **Data Views** page. To find this page, navigate to **Data Views** in the navigation menu or by using the [global search field](/explore-analyze/find-and-organize/find-apps-and-objects.md). diff --git a/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md b/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md index d9ba38c7c3..35c4423798 100644 --- a/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md +++ b/raw-migrated-files/stack-docs/elastic-stack/air-gapped-install.md @@ -40,7 +40,7 @@ Some components of the {{stack}} require additional configuration and local depe * [D.2.1. Using the {{kib}} UI](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-kibana) * [D.2.2. Using the `kibana.yml` config file](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-yml) - * [D.2.3. Using the {{kib}} {fleet} API](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-fleet-api) + * [D.2.3. Using the {{kib}} {{fleet}} API](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-fleet-api) ::::{note} @@ -92,7 +92,7 @@ Elastic {{beats}} are light-weight data shippers. They do not require any unique Air-gapped install of {{agent}} depends on the [{{package-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-package-registry) and the [{{artifact-registry}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-artifact-registry) for most use-cases. The agent itself is fairly lightweight and installs dependencies only as required by its configuration. In terms of connections to these dependencies, {{agents}} need to be able to connect to the {{artifact-registry}} directly, but {{package-registry}} connections are handled through [{{kib}}](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-kibana). -Additionally, if the {{agent}} {elastic-defend} integration is used, then access to the [Elastic Endpoint Artifact Repository](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-endpoint-artifact-repository) is necessary in order to deploy updates for some of the detection and prevention capabilities. +Additionally, if the {{agent}} {{elastic-defend}} integration is used, then access to the [Elastic Endpoint Artifact Repository](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-elastic-endpoint-artifact-repository) is necessary in order to deploy updates for some of the detection and prevention capabilities. To learn more about install and configuration, refer to the [{{agent}} install documentation](https://www.elastic.co/guide/en/fleet/current/elastic-agent-installation.html). Make sure to check the requirements specific to running {{agents}} in an [air-gapped environment](https://www.elastic.co/guide/en/fleet/current/air-gapped.html). @@ -480,7 +480,7 @@ There are three ways to configure {{agent}} integrations: * [D.2.1. Using the {{kib}} UI](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-kibana) * [D.2.2. Using the `kibana.yml` config file](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-yml) -* [D.2.3. Using the {{kib}} {fleet} API](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-fleet-api) +* [D.2.3. Using the {{kib}} {{fleet}} API](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-fleet-api) #### D.2.1. Using the {{kib}} UI [air-gapped-agent-integration-configure-kibana] @@ -514,7 +514,7 @@ You can have {{kib}} create {{agent}} policies on your behalf by adding appropri : Takes a list of all integration package names and versions that {{kib}} should download from the {{package-registry}} (EPR). This is done because {{agents}} themselves do not directly fetch packages from the EPR. `xpack.fleet.agentPolicies` -: Takes a list of {{agent}} policies in the format expected by the [{{kib}} {fleet} HTTP API](https://www.elastic.co/guide/en/fleet/current/fleet-api-docs.html). Refer to the setting in [Preconfiguration settings](https://www.elastic.co/guide/en/kibana/current/fleet-settings-kb.html#_preconfiguration_settings_for_advanced_use_cases) for the format. See also [D.2.3. Using the {{kib}} {fleet} API](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-fleet-api). +: Takes a list of {{agent}} policies in the format expected by the [{{kib}} {{fleet}} HTTP API](https://www.elastic.co/guide/en/fleet/current/fleet-api-docs.html). Refer to the setting in [Preconfiguration settings](https://www.elastic.co/guide/en/kibana/current/fleet-settings-kb.html#_preconfiguration_settings_for_advanced_use_cases) for the format. See also [D.2.3. Using the {{kib}} {{fleet}} API](../../../deploy-manage/deploy/self-managed/air-gapped-install.md#air-gapped-agent-integration-configure-fleet-api). `xpack.fleet.registryUrl` : Takes a URL of the {{package-registry}} that can be reached by the {{kib}} server. Enable this setting only when deploying in an air-gapped environment. @@ -523,17 +523,17 @@ Other settings : You can add other, more discretionary settings for {{fleet}}, {{agents}}, & policies. Refer to [Fleet settings in {{kib}}](https://www.elastic.co/guide/en/kibana/current/fleet-settings-kb.html). -#### D.2.3. Using the {{kib}} {fleet} API [air-gapped-agent-integration-configure-fleet-api] +#### D.2.3. Using the {{kib}} {{fleet}} API [air-gapped-agent-integration-configure-fleet-api] **Best option for**: Declarative configuration and users who need reproducible and automated deployments in even the trickiest of environments. **Example:** See the following. -It is possible to use custom scripts that call the {{kib}} {fleet} API to create or update policies without restarting {{kib}}, and also allowing for custom error handling and update logic. +It is possible to use custom scripts that call the {{kib}} {{fleet}} API to create or update policies without restarting {{kib}}, and also allowing for custom error handling and update logic. -At this time, you can refer to the the [{{kib}} {fleet} HTTP API](https://www.elastic.co/guide/en/fleet/current/fleet-api-docs.html) documentation, however additional resources from public code repositories should be consulted to capture the full set of configuration options available for a given integration. Specifically, many integrations have configuration options such as `inputs` and `data_streams` that are unique. +At this time, you can refer to the the [{{kib}} {{fleet}} HTTP API](https://www.elastic.co/guide/en/fleet/current/fleet-api-docs.html) documentation, however additional resources from public code repositories should be consulted to capture the full set of configuration options available for a given integration. Specifically, many integrations have configuration options such as `inputs` and `data_streams` that are unique. -In particular, the `*.yml.hbs` templates should be consulted to determine which `vars` are available for configuring a particular integration using the {{kib}} {fleet} API. +In particular, the `*.yml.hbs` templates should be consulted to determine which `vars` are available for configuring a particular integration using the {{kib}} {{fleet}} API. * For most Integrations, refer to the README and `*.yml.hbs` files in the appropriate directory in the [elastic/integrations repository](https://github.com/elastic/integrations/tree/main/packages). * For the APM integration, refer to the README and `*.yml.hbs` files in the [elastic/apm-server repository](https://github.com/elastic/apm-server/tree/main/apmpackage/apm/agent). diff --git a/raw-migrated-files/stack-docs/elastic-stack/upgrading-elastic-stack.md b/raw-migrated-files/stack-docs/elastic-stack/upgrading-elastic-stack.md index f395e3ce79..103825b7d6 100644 --- a/raw-migrated-files/stack-docs/elastic-stack/upgrading-elastic-stack.md +++ b/raw-migrated-files/stack-docs/elastic-stack/upgrading-elastic-stack.md @@ -26,7 +26,7 @@ Upgrading from a release candidate build, such as 8.0.0-rc1 or 8.0.0-rc2, is not ::::{important} * Make sure you check the breaking changes for each minor release up to 9.0.0-beta1. - * If you are using {{ml}} {dfeeds} that contain discontinued search or query domain specific language (DSL), the upgrade will fail. In 5.6.5 and later, the Upgrade Assistant provides information about which {{dfeeds}} need to be updated. + * If you are using {{ml}} {{dfeeds}} that contain discontinued search or query domain specific language (DSL), the upgrade will fail. In 5.6.5 and later, the Upgrade Assistant provides information about which {{dfeeds}} need to be updated. :::: diff --git a/serverless/pages/action-connectors.asciidoc b/serverless/pages/action-connectors.asciidoc index 8d523714b1..9b699bfa98 100644 --- a/serverless/pages/action-connectors.asciidoc +++ b/serverless/pages/action-connectors.asciidoc @@ -47,7 +47,7 @@ Actions are instantiations of a connector that are linked to rules and run as ba //// //// -/* {kib} provides the following types of connectors for use with {alert-features} : +/* {kib} provides the following types of connectors for use with {{alert-features}} : - [D3 Security]({kibana-ref}/d3security-action-type.html) - [Email]({kibana-ref}/email-action-type.html) diff --git a/serverless/pages/api-keys.asciidoc b/serverless/pages/api-keys.asciidoc index d3a1519db0..f29ca6f3dc 100644 --- a/serverless/pages/api-keys.asciidoc +++ b/serverless/pages/api-keys.asciidoc @@ -9,7 +9,7 @@ This content applies to: {es-badge} {obs-badge} {sec-badge} API keys are security mechanisms used to authenticate and authorize access to {stack} resources, and ensure that only authorized users or applications are able to interact with the {stack}. -For example, if you extract data from an {es} cluster on a daily basis, you might create an API key tied to your credentials, configure it with minimum access, and then put the API credentials into a cron job. +For example, if you extract data from an {{es}} cluster on a daily basis, you might create an API key tied to your credentials, configure it with minimum access, and then put the API credentials into a cron job. Or, you might create API keys to automate ingestion of new data from remote sources, without a live user interaction. You can manage your keys in **{project-settings} → {manage-app} → {api-keys-app}**: @@ -44,7 +44,7 @@ In **{api-keys-app}**, click **Create API key**: [role="screenshot"] image::images/create-personal-api-key.png["Create API key UI"] -Once created, you can copy the encoded API key and use it to send requests to the {es} HTTP API. For example: +Once created, you can copy the encoded API key and use it to send requests to the {{es}} HTTP API. For example: [source,bash] ---- diff --git a/serverless/pages/apis-elasticsearch-conventions.asciidoc b/serverless/pages/apis-elasticsearch-conventions.asciidoc index 4681854faf..7d733b77a9 100644 --- a/serverless/pages/apis-elasticsearch-conventions.asciidoc +++ b/serverless/pages/apis-elasticsearch-conventions.asciidoc @@ -1,10 +1,10 @@ [[elasticsearch-api-conventions]] -= {es} API conventions += {{es}} API conventions // :description: The {es-serverless} REST APIs have conventions for headers and request bodies. // :keywords: serverless, elasticsearch, API, reference -You can run {es} API requests in **{dev-tools-app} → Console**. +You can run {{es}} API requests in **{dev-tools-app} → Console**. For example: [source,shell] @@ -18,8 +18,8 @@ Check out <>. [[elasticsearch-api-conventions-request-headers]] == Request headers -When you call {es} APIs outside of the Console, you must provide a request header. -The {es} APIs support the `Authorization`, `Content-Type`, and `X-Opaque-Id` headers. +When you call {{es}} APIs outside of the Console, you must provide a request header. +The {{es}} APIs support the `Authorization`, `Content-Type`, and `X-Opaque-Id` headers. [discrete] [[elasticsearch-api-conventions-authorization]] @@ -35,7 +35,7 @@ curl -X GET "${ES_URL}/_cat/indices?v=true" \ -H "Authorization: ApiKey ${API_KEY}" ---- -To get API keys for the {es} endpoint (`${ES_URL}`) for a project, refer to <>. +To get API keys for the {{es}} endpoint (`${ES_URL}`) for a project, refer to <>. [discrete] [[elasticsearch-api-conventions-content-type]] @@ -79,7 +79,7 @@ Because it's used only for traces, you can safely generate a unique `traceparent {es} APIs surface the header's `trace-id` value as `trace.id` in the: -* JSON {es} server logs +* JSON {{es}} server logs * Slow logs * Deprecation logs @@ -92,7 +92,7 @@ For example, a `traceparent` value of `00-0af7651916cd43dd8448eb211c80319c-b7ad6 [[elasticsearch-api-conventions-x-opaque-id]] === X-Opaque-Id -You can pass an `X-Opaque-Id` HTTP header to track the origin of a request in {es} logs and tasks. +You can pass an `X-Opaque-Id` HTTP header to track the origin of a request in {{es}} logs and tasks. For example: [source,bash] @@ -126,7 +126,7 @@ curl -X GET "${ES_URL}/_search?pretty" \ */ //// -For the deprecation logs, {es} also uses the `X-Opaque-Id` value to throttle and deduplicate deprecation warnings. +For the deprecation logs, {{es}} also uses the `X-Opaque-Id` value to throttle and deduplicate deprecation warnings. //// /* MISSING LINKS @@ -137,16 +137,16 @@ See \<\<_deprecation_logs_throttling>>. The `X-Opaque-Id` header accepts any arbitrary value. However, it is recommended that you limit these values to a finite set, such as an ID per client. Don't generate a unique `X-Opaque-Id` header for every request. -Too many unique `X-Opaque-Id` values can prevent {es} from deduplicating warnings in the deprecation logs. +Too many unique `X-Opaque-Id` values can prevent {{es}} from deduplicating warnings in the deprecation logs. [discrete] [[elasticsearch-api-conventions-request-bodies]] == Request bodies -A number of {es} APIs with GET operations--most notably the search API--support a request body. +A number of {{es}} APIs with GET operations--most notably the search API--support a request body. While the GET operation makes sense in the context of retrieving information, GET requests with a body are not supported by all HTTP libraries. -All {es} APIs with GET operations that require a body can also be submitted as POST requests. +All {{es}} APIs with GET operations that require a body can also be submitted as POST requests. Alternatively, you can pass the request body as the `source` query string parameter when using GET. When you use this method, the `source_content_type` parameter should also be passed with a media type value that indicates the format of the source, such as `application/json`. @@ -157,7 +157,7 @@ Error: no handler found for uri [.../_search?pretty=true] and method [GET]" ## Date math -Most {es} APIs that accept an index or index alias argument support date math. +Most {{es}} APIs that accept an index or index alias argument support date math. Date math name resolution enables you to search a range of time series indices or index aliases rather than searching all of your indices and filtering the results. Limiting the number of searched indices reduces cluster load and improves search performance. For example, if you are searching for errors in your daily logs, you can use a date math name template to restrict the search to the past two days. diff --git a/serverless/pages/apis-http-apis.asciidoc b/serverless/pages/apis-http-apis.asciidoc index 8de2a62aed..37085560e4 100644 --- a/serverless/pages/apis-http-apis.asciidoc +++ b/serverless/pages/apis-http-apis.asciidoc @@ -1,7 +1,7 @@ [[elasticsearch-http-apis]] = REST APIs -// :description: {es} and {kib} expose REST APIs that can be called directly to configure and access {stack} features. +// :description: {{es}} and {kib} expose REST APIs that can be called directly to configure and access {stack} features. // :keywords: serverless, elasticsearch, http, rest, overview [discrete] diff --git a/serverless/pages/clients-dot-net-getting-started.asciidoc b/serverless/pages/clients-dot-net-getting-started.asciidoc index 1f9ab78819..66ecf48c27 100644 --- a/serverless/pages/clients-dot-net-getting-started.asciidoc +++ b/serverless/pages/clients-dot-net-getting-started.asciidoc @@ -34,14 +34,14 @@ dotnet add package Elastic.Clients.Elasticsearch.Serverless [[elasticsearch-dot-net-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {es} endpoint: +Initialize the client using your API key and {{es}} endpoint: [source,net] ---- var client = new ElasticsearchClient("", new ApiKey("")); ---- -To get API keys for the {es} endpoint for a project, see <>. +To get API keys for the {{es}} endpoint for a project, see <>. [discrete] [[elasticsearch-dot-net-client-getting-started-using-the-api]] diff --git a/serverless/pages/clients-go-getting-started.asciidoc b/serverless/pages/clients-go-getting-started.asciidoc index f3fd0c8f39..ae0e0c0574 100644 --- a/serverless/pages/clients-go-getting-started.asciidoc +++ b/serverless/pages/clients-go-getting-started.asciidoc @@ -4,7 +4,7 @@ // :description: Set up and use the Go client. // :keywords: serverless, elasticsearch, go, how to -This page guides you through the installation process of the {es} Go +This page guides you through the installation process of the {{es}} Go client, shows you how to initialize the client, and how to perform basic {es} operations with it. @@ -55,7 +55,7 @@ import ( [[elasticsearch-go-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {es} endpoint: +Initialize the client using your API key and {{es}} endpoint: [source,go] ---- @@ -68,7 +68,7 @@ if err != nil { } ---- -To get API keys for the {es} endpoint for a project, see <>. +To get API keys for the {{es}} endpoint for a project, see <>. [discrete] [[elasticsearch-go-client-getting-started-using-the-api]] diff --git a/serverless/pages/clients-java-getting-started.asciidoc b/serverless/pages/clients-java-getting-started.asciidoc index b3e8dcdf65..9768c9ea50 100644 --- a/serverless/pages/clients-java-getting-started.asciidoc +++ b/serverless/pages/clients-java-getting-started.asciidoc @@ -21,7 +21,7 @@ The same client is used for {es3}, on-premise and managed Elasticsearch. Some AP * Java 8 or later. * A JSON object mapping library to allow seamless integration of -your application classes with the {es} API. The examples below +your application classes with the {{es}} API. The examples below show usage with Jackson. [discrete] @@ -79,7 +79,7 @@ the following to the `pom.xml` of your project: [[elasticsearch-java-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {es} endpoint: +Initialize the client using your API key and {{es}} endpoint: [source,java] ---- @@ -103,7 +103,7 @@ ElasticsearchTransport transport = new RestClientTransport( ElasticsearchClient esClient = new ElasticsearchClient(transport); ---- -To get API keys for the {es} endpoint for a project, see <>. +To get API keys for the {{es}} endpoint for a project, see <>. [discrete] [[elasticsearch-java-client-getting-started-using-the-api]] @@ -157,7 +157,7 @@ SearchResponse response = esClient.search(s -> s A few things to note in the above example: * The search query is built using a hierarchy of lambda expressions that closely -follows the {es} HTTP API. Lambda expressions allows you to be guided +follows the {{es}} HTTP API. Lambda expressions allows you to be guided by your IDE's autocompletion, without having to import (or even know!) the actual classes representing a query. * The last parameter `Product.class` instructs the client to return results as diff --git a/serverless/pages/clients-nodejs-getting-started.asciidoc b/serverless/pages/clients-nodejs-getting-started.asciidoc index d4c57d03c4..7a64ab1c64 100644 --- a/serverless/pages/clients-nodejs-getting-started.asciidoc +++ b/serverless/pages/clients-nodejs-getting-started.asciidoc @@ -39,7 +39,7 @@ npm install @elastic/elasticsearch-serverless [[elasticsearch-nodejs-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {es} endpoint: +Initialize the client using your API key and {{es}} endpoint: [source,js] ---- diff --git a/serverless/pages/clients-php-getting-started.asciidoc b/serverless/pages/clients-php-getting-started.asciidoc index 87522d283f..78b36b150b 100644 --- a/serverless/pages/clients-php-getting-started.asciidoc +++ b/serverless/pages/clients-php-getting-started.asciidoc @@ -39,7 +39,7 @@ composer require elastic/elasticsearch-serverless [[elasticsearch-php-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {es} endpoint: +Initialize the client using your API key and {{es}} endpoint: [source,php] ---- @@ -53,7 +53,7 @@ $client = ClientBuilder::create() ->build(); ---- -To get API keys for the {es} endpoint for a project, see <>. +To get API keys for the {{es}} endpoint for a project, see <>. [discrete] [[elasticsearch-php-client-getting-started-using-the-api]] diff --git a/serverless/pages/clients-python-getting-started.asciidoc b/serverless/pages/clients-python-getting-started.asciidoc index c00fdce2aa..15c4258193 100644 --- a/serverless/pages/clients-python-getting-started.asciidoc +++ b/serverless/pages/clients-python-getting-started.asciidoc @@ -50,7 +50,7 @@ python -m pip install elasticsearch [[elasticsearch-python-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {es} endpoint: +Initialize the client using your API key and {{es}} endpoint: [source,python] ---- @@ -62,7 +62,7 @@ client = Elasticsearch( ) ---- -To get API keys for the {es} endpoint for a project, see <>. +To get API keys for the {{es}} endpoint for a project, see <>. [discrete] [[elasticsearch-python-client-getting-started-using-the-api]] diff --git a/serverless/pages/clients-ruby-getting-started.asciidoc b/serverless/pages/clients-ruby-getting-started.asciidoc index f71553923c..0ef6da9eb1 100644 --- a/serverless/pages/clients-ruby-getting-started.asciidoc +++ b/serverless/pages/clients-ruby-getting-started.asciidoc @@ -18,7 +18,7 @@ client for {es3}, shows you how to initialize the client, and how to perform bas == Requirements * Ruby 3.0 or higher installed on your system. -* To use the `elasticsearch-serverless` gem, you must have an API key and {es} endpoint for an {es3} project. +* To use the `elasticsearch-serverless` gem, you must have an API key and {{es}} endpoint for an {es3} project. [discrete] [[elasticsearch-ruby-client-getting-started-installation]] @@ -90,7 +90,7 @@ bundle exec rake console [[elasticsearch-ruby-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {es} endpoint: +Initialize the client using your API key and {{es}} endpoint: [source,ruby] ---- @@ -100,7 +100,7 @@ client = ElasticsearchServerless::Client.new( ) ---- -To get API keys for the {es} endpoint for a project, see <>. +To get API keys for the {{es}} endpoint for a project, see <>. [discrete] [[elasticsearch-ruby-client-getting-started-using-the-api]] diff --git a/serverless/pages/clients.asciidoc b/serverless/pages/clients.asciidoc index eb747d853a..55fecdfc5f 100644 --- a/serverless/pages/clients.asciidoc +++ b/serverless/pages/clients.asciidoc @@ -1,7 +1,7 @@ [[elasticsearch-clients]] = Client libraries -// :description: Index, search, and manage {es} data in your preferred language. +// :description: Index, search, and manage {{es}} data in your preferred language. // :keywords: serverless, elasticsearch, clients, overview You can use the following language clients with {es-serverless}: diff --git a/serverless/pages/cloud-regions.asciidoc b/serverless/pages/cloud-regions.asciidoc index ec64f7dfa8..d2bec3a22a 100644 --- a/serverless/pages/cloud-regions.asciidoc +++ b/serverless/pages/cloud-regions.asciidoc @@ -1,7 +1,7 @@ [[regions]] = Serverless regions -// :description: Index, search, and manage {es} data in your preferred language. +// :description: Index, search, and manage {{es}} data in your preferred language. // :keywords: serverless, regions, aws, azure, cloud A region is the geographic area where the data center of the cloud provider that hosts your project is located. Review the available Elastic Cloud Serverless regions to decide which region to use. If you aren't sure which region to pick, choose one that is geographically close to you to reduce latency. diff --git a/serverless/pages/connecting-to-es-endpoint.asciidoc b/serverless/pages/connecting-to-es-endpoint.asciidoc index 3474623637..20faa531ae 100644 --- a/serverless/pages/connecting-to-es-endpoint.asciidoc +++ b/serverless/pages/connecting-to-es-endpoint.asciidoc @@ -17,7 +17,7 @@ To connect to your Elasticsearch instance from your applications, client librari [[elasticsearch-get-started-create-api-key]] == Create a new API key -Create an API key to authenticate your requests to the {es} APIs. You'll need an API key for all API requests and client connections. +Create an API key to authenticate your requests to the {{es}} APIs. You'll need an API key for all API requests and client connections. To create a new API key: @@ -40,10 +40,10 @@ You can't recover or retrieve a lost API key. Instead, you must delete the key a [discrete] [[elasticsearch-get-started-endpoint]] -== Get your {es} endpoint URL +== Get your {{es}} endpoint URL -The endpoint URL is the address for your {es} instance. -You'll use this URL together with your API key to make requests to the {es} APIs. +The endpoint URL is the address for your {{es}} instance. +You'll use this URL together with your API key to make requests to the {{es}} APIs. To find the endpoint URL: diff --git a/serverless/pages/custom-roles.asciidoc b/serverless/pages/custom-roles.asciidoc index 4bd795f0ac..2301903456 100644 --- a/serverless/pages/custom-roles.asciidoc +++ b/serverless/pages/custom-roles.asciidoc @@ -34,7 +34,7 @@ image::images/custom-roles-ui.png[Custom Roles app] // TO-DO: This screenshot needs to be refreshed and automated. Roles are a collection of privileges that enable users to access project features and data. -For example, when you create a custom role, you can assign {es} cluster and index privileges and {kib} privileges. +For example, when you create a custom role, you can assign {{es}} cluster and index privileges and {kib} privileges. [NOTE] ==== @@ -43,13 +43,13 @@ You cannot assign {ref}/security-privileges.html#_run_as_privilege[run as privil [discrete] [[custom-roles-es-cluster-privileges]] -== {es} cluster privileges +== {{es}} cluster privileges Cluster privileges grant access to monitoring and management features in {es}. They also enable some {stack-manage-app} capabilities in your project. [role="screenshot"] -image::images/custom-roles-cluster-privileges.png[Create a custom role and define {es} cluster privileges] +image::images/custom-roles-cluster-privileges.png[Create a custom role and define {{es}} cluster privileges] // TO-DO: This screenshot needs to be refreshed and automated. @@ -57,22 +57,22 @@ Refer to {ref}/security-privileges.html#privileges-list-cluster[cluster privileg [discrete] [[custom-roles-es-index-privileges]] -== {es} index privileges +== {{es}} index privileges Each role can grant access to multiple data indices, and each index can have a different set of privileges. Typically, you will grant the `read` and `view_index_metadata` privileges to each index that you expect your users to work with. For example, grant access to indices that match an `acme-marketing-*` pattern: [role="screenshot"] -image::images/custom-roles-index-privileges.png[Create a custom role and define {es} index privileges] +image::images/custom-roles-index-privileges.png[Create a custom role and define {{es}} index privileges] // TO-DO: This screenshot needs to be refreshed and automated. Refer to {ref}/security-privileges.html#privileges-list-indices[index privileges] for a complete description of available options. Document-level and field-level security affords you even more granularity when it comes to granting access to your data. -With document-level security (DLS), you can write an {es} query to describe which documents this role grants access to. -With field-level security (FLS), you can instruct {es} to grant or deny access to specific fields within each document. +With document-level security (DLS), you can write an {{es}} query to describe which documents this role grants access to. +With field-level security (FLS), you can instruct {{es}} to grant or deny access to specific fields within each document. // Derived from https://www.elastic.co/guide/en/kibana/current/kibana-role-management.html#adding_cluster_privileges diff --git a/serverless/pages/data-views.asciidoc b/serverless/pages/data-views.asciidoc index 3a0d613946..3098b124aa 100644 --- a/serverless/pages/data-views.asciidoc +++ b/serverless/pages/data-views.asciidoc @@ -1,13 +1,13 @@ [[data-views]] = {data-sources-cap} -// :description: Elastic requires a {data-source} to access the {es} data that you want to explore. +// :description: Elastic requires a {{data-source}} to access the {{es}} data that you want to explore. // :keywords: serverless, Elasticsearch, Observability, Security This content applies to: {es-badge} {obs-badge} {sec-badge} -A {data-source} can point to one or more indices, {ref}/data-streams.html[data streams], or {ref}/alias.html[index aliases]. -For example, a {data-source} can point to your log data from yesterday or all indices that contain your data. +A {{data-source}} can point to one or more indices, {ref}/data-streams.html[data streams], or {ref}/alias.html[index aliases]. +For example, a {{data-source}} can point to your log data from yesterday or all indices that contain your data. //// /* @@ -17,7 +17,7 @@ For example, a {data-source} can point to your log data from yesterday or all in * Access to **Data Views** requires the {kib} privilege `Data View Management`. -* To create a {data-source}, you must have the {es} privilege +* To create a {{data-source}}, you must have the {es} privilege `view_index_metadata`. * If a read-only indicator appears, you have insufficient privileges @@ -33,15 +33,15 @@ For example, a {data-source} can point to your log data from yesterday or all in After you've loaded your data, follow these steps to create a {data-source}: -// +// . Go to **{project-settings} → {manage-app} → {data-views-app}**. Alternatively, go to **Discover** and open the data view menu. + [role="screenshot"] -image:images/discover-find-data-view.png[How to set the {data-source} in Discover] +image:images/discover-find-data-view.png[How to set the {{data-source}} in Discover] + . Click **Create a {data-source}**. -. Give your {data-source} a name. +. Give your {{data-source}} a name. . Start typing in the **Index pattern** field, and Elastic looks for the names of indices, data streams, and aliases that match your input. You can view all available sources or only the sources that the data view targets. @@ -65,8 +65,8 @@ based on different timestamps. . Click **Show advanced settings** to: + ** Display hidden and system indices. -** Specify your own {data-source} name. For example, enter your {es} index alias name. -. Click **Save {data-source} to Elastic**. +** Specify your own {{data-source}} name. For example, enter your {{es}} index alias name. +. Click **Save {{data-source}} to Elastic**. You can manage your data views in **{project-settings} → {manage-app} → {data-views-app}**. @@ -76,10 +76,10 @@ You can manage your data views in **{project-settings} → {manage-app} → {dat Want to explore your data or create a visualization without saving it as a data view? Select **Use without saving** in the **Create {data-source}** form in **Discover**. -With a temporary {data-source}, you can add fields and create an {es} query alert, just like you would a regular {data-source}. +With a temporary {{data-source}}, you can add fields and create an {{es}} query alert, just like you would a regular {{data-source}}. Your work won't be visible to others in your space. -A temporary {data-source} remains in your space until you change apps, or until you save it. +A temporary {{data-source}} remains in your space until you change apps, or until you save it. // ![how to create an ad-hoc data view](https://images.contentstack.io/v3/assets/bltefdd0b53724fa2ce/blte3a4f3994c44c0cc/637eb0c95834861044c21a25/ad-hoc-data-view.gif) @@ -94,7 +94,7 @@ A temporary {data-source} remains in your space until you change apps, or until ### Use {data-sources} with rolled up data -A {data-source} can match one rollup index. For a combination rollup +A {{data-source}} can match one rollup index. For a combination rollup {data-source} with both raw and rolled up data, use the standard notation: ```ts @@ -108,8 +108,8 @@ For an example, refer to : Debug your searches using various {es} APIs. +// - : Debug your searches using various {{es}} APIs. diff --git a/serverless/pages/explore-your-data-ml-nlp-deploy-model.asciidoc b/serverless/pages/explore-your-data-ml-nlp-deploy-model.asciidoc index 6f24c54372..35a816b786 100644 --- a/serverless/pages/explore-your-data-ml-nlp-deploy-model.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-deploy-model.asciidoc @@ -27,7 +27,7 @@ allocation. Since eland uses APIs to deploy the models, you cannot see the models in {kib} until the saved objects are synchronized. You can follow the prompts in {kib}, wait for automatic synchronization, or use the -{kibana-ref}/machine-learning-api-sync.html[sync {ml} saved objects API]. +{kibana-ref}/machine-learning-api-sync.html[sync {{ml}} saved objects API]. ==== When you deploy the model, its allocations are distributed across available {ml} @@ -39,8 +39,8 @@ Throughput can be scaled by adding more allocations to the deployment; it increases the number of {infer} requests that can be performed in parallel. All allocations assigned to a node share the same copy of the model in memory. The model is loaded into memory in a native process that encapsulates `libtorch`, -which is the underlying {ml} library of PyTorch. The number of allocations -setting affects the amount of model allocations across all the {ml} nodes. Model +which is the underlying {{ml}} library of PyTorch. The number of allocations +setting affects the amount of model allocations across all the {{ml}} nodes. Model allocations are distributed in such a way that the total number of used threads does not exceed the allocated processors of a node. diff --git a/serverless/pages/explore-your-data-ml-nlp-elser.asciidoc b/serverless/pages/explore-your-data-ml-nlp-elser.asciidoc index 32a026f542..8e25a652c9 100644 --- a/serverless/pages/explore-your-data-ml-nlp-elser.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-elser.asciidoc @@ -32,7 +32,7 @@ for semantic search or the trial period activated. == Benchmarks The following sections provide information about how ELSER performs on different -hardwares and compares the model performance to {es} BM25 and other strong +hardwares and compares the model performance to {{es}} BM25 and other strong baselines such as Splade or OpenAI. [discrete] @@ -84,7 +84,7 @@ Discounted Cumulative Gain (NDCG) which can handle multiple relevant documents and fine-grained document ratings. The metric is applied to a fixed-sized list of retrieved documents which, in this case, is the top 10 documents (NDCG@10). -The table below shows the performance of ELSER compared to {es} BM25 with an +The table below shows the performance of ELSER compared to {{es}} BM25 with an English analyzer broken down by the 12 data sets used for the evaluation. ELSER has 10 wins, 1 draw, 1 loss and an average improvement in NDCG@10 of 17%. diff --git a/serverless/pages/explore-your-data-ml-nlp-import-model.asciidoc b/serverless/pages/explore-your-data-ml-nlp-import-model.asciidoc index 2fc9bba428..dcffca6cf8 100644 --- a/serverless/pages/explore-your-data-ml-nlp-import-model.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-import-model.asciidoc @@ -19,7 +19,7 @@ Trained models must be in a TorchScript representation for use with {stack-ml-features}. ==== -https://github.com/elastic/eland[Eland] is an {es} Python client that +https://github.com/elastic/eland[Eland] is an {{es}} Python client that provides a simple script to perform the conversion of Hugging Face transformer models to their TorchScript representations, the chunking process, and upload to {es}; it is therefore the recommended import method. You can either install @@ -40,7 +40,7 @@ python -m pip install 'eland[pytorch]' + // NOTCONSOLE . Run the `eland_import_hub_model` script to download the model from Hugging -Face, convert it to TorchScript format, and upload to the {es} cluster. +Face, convert it to TorchScript format, and upload to the {{es}} cluster. For example: + // NOTCONSOLE @@ -100,7 +100,7 @@ docker run -it --rm elastic/eland \ --start ---- -Replace the `$ELASTICSEARCH_URL` with the URL for your {es} cluster. Refer to +Replace the `$ELASTICSEARCH_URL` with the URL for your {{es}} cluster. Refer to https://www.elastic.co/docs/current/serverless/elasticsearch/explore-your-data-ml-nlp/deploy-trained-models/import-model[Authentication methods] to learn more. diff --git a/serverless/pages/explore-your-data-ml-nlp-model-ref.asciidoc b/serverless/pages/explore-your-data-ml-nlp-model-ref.asciidoc index 5c38de51ab..e0f1e73287 100644 --- a/serverless/pages/explore-your-data-ml-nlp-model-ref.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-model-ref.asciidoc @@ -71,13 +71,13 @@ for calculating the similarity between the embeddings they produce. Examples of typical scoring functions are: `cosine`, `dot product` and `euclidean distance` (also known as `l2_norm`). -The embeddings produced by these models should be indexed in {es} using the +The embeddings produced by these models should be indexed in {{es}} using the {ref}/dense-vector.html[dense vector field type] with an appropriate {ref}/dense-vector.html#dense-vector-params[similarity function] chosen for the model. -To find similar embeddings in {es} use the efficient +To find similar embeddings in {{es}} use the efficient {ref}/knn-search.html#approximate-knn[Approximate k-nearest neighbor (kNN)] search API with a text embedding as the query vector. Approximate kNN search uses the similarity function defined in the dense vector field mapping is used @@ -144,7 +144,7 @@ Using `DPREncoderWrapper`: == Expected model output Models used for each NLP task type must output tensors of a specific format to -be used in the {es} NLP pipelines. +be used in the {{es}} NLP pipelines. Here are the expected outputs for each task type. diff --git a/serverless/pages/explore-your-data-ml-nlp-ner-example.asciidoc b/serverless/pages/explore-your-data-ml-nlp-ner-example.asciidoc index 03cd5ce39a..c6c0bf64be 100644 --- a/serverless/pages/explore-your-data-ml-nlp-ner-example.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-ner-example.asciidoc @@ -69,7 +69,7 @@ message is displayed at the top of the page that says _"ML job and trained model synchronization required"_. Follow the link to _"Synchronize your jobs and trained models."_ Then click **Synchronize**. You can also wait for the automatic synchronization that occurs in every hour, or -use the {kibana-ref}/ml-sync.html[sync {ml} objects API]. +use the {kibana-ref}/ml-sync.html[sync {{ml}} objects API]. [discrete] [[test-the-ner-model]] diff --git a/serverless/pages/explore-your-data-ml-nlp-search-compare.asciidoc b/serverless/pages/explore-your-data-ml-nlp-search-compare.asciidoc index 97730070a7..3c3fe71874 100644 --- a/serverless/pages/explore-your-data-ml-nlp-search-compare.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-search-compare.asciidoc @@ -14,7 +14,7 @@ unstructured text or compare different pieces of text. == Text embedding Text embedding is a task which produces a mathematical representation of text -called an embedding. The {ml} model turns the text into an array of numerical +called an embedding. The {{ml}} model turns the text into an array of numerical values (also known as a _vector_). Pieces of content with similar meaning have similar representations. This means it is possible to determine whether different pieces of text are either semantically similar, different, or even diff --git a/serverless/pages/explore-your-data-ml-nlp-test-inference.asciidoc b/serverless/pages/explore-your-data-ml-nlp-test-inference.asciidoc index 4cd1b725bd..2bc29bf07d 100644 --- a/serverless/pages/explore-your-data-ml-nlp-test-inference.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-test-inference.asciidoc @@ -4,7 +4,7 @@ // :keywords: serverless, elasticsearch, tbd When the model is deployed on at least one node in the cluster, you can begin to -perform inference. _{infer-cap}_ is a {ml} feature that enables you to use +perform inference. _{infer-cap}_ is a {{ml}} feature that enables you to use your trained models to perform NLP tasks (such as text extraction, classification, or embeddings) on incoming data. @@ -13,7 +13,7 @@ The simplest method to test your model against new data is to use the field of an existing index in your cluster to test the model: [role="screenshot"] -image::images/ml-nlp-test-ner.png[Testing a sentence with two named entities against a NER trained model in the {ml} app] +image::images/ml-nlp-test-ner.png[Testing a sentence with two named entities against a NER trained model in the {{ml}} app] Alternatively, you can use the {ref}/infer-trained-model.html[infer trained model API]. diff --git a/serverless/pages/explore-your-data-ml-nlp-text-embedding-example.asciidoc b/serverless/pages/explore-your-data-ml-nlp-text-embedding-example.asciidoc index a581f8df33..fe4e8052c7 100644 --- a/serverless/pages/explore-your-data-ml-nlp-text-embedding-example.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-text-embedding-example.asciidoc @@ -76,7 +76,7 @@ message is displayed at the top of the page that says _"ML job and trained model synchronization required"_. Follow the link to _"Synchronize your jobs and trained models."_ Then click **Synchronize**. You can also wait for the automatic synchronization that occurs in every hour, or -use the {kibana-ref}/ml-sync.html[sync {ml} objects API]. +use the {kibana-ref}/ml-sync.html[sync {{ml}} objects API]. [discrete] [[test-the-text-embedding-model]] diff --git a/serverless/pages/explore-your-data-ml-nlp.asciidoc b/serverless/pages/explore-your-data-ml-nlp.asciidoc index 8fe9e9476d..51550c9a5d 100644 --- a/serverless/pages/explore-your-data-ml-nlp.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp.asciidoc @@ -6,7 +6,7 @@ natural language in spoken word or written text. Classically, NLP was performed using linguistic rules, dictionaries, regular -expressions, and {ml} for specific tasks such as automatic categorization or +expressions, and {{ml}} for specific tasks such as automatic categorization or summarization of text. In recent years, however, deep learning techniques have taken over much of the NLP landscape. Deep learning capitalizes on the availability of large scale data sets, cheap computation, and techniques for @@ -14,7 +14,7 @@ learning at scale with less human involvement. Pre-trained language models that use a transformer architecture have been particularly successful. For example, BERT is a pre-trained language model that was released by Google in 2018. Since that time, it has become the inspiration for most of today’s modern NLP -techniques. The {stack} {ml} features are structured around BERT and +techniques. The {stack} {{ml}} features are structured around BERT and transformer models. These features support BERT’s tokenization scheme (called WordPiece) and transformer models that conform to the standard BERT model interface. For the current list of supported architectures, refer to @@ -22,7 +22,7 @@ interface. For the current list of supported architectures, refer to To incorporate transformer models and make predictions, {es-serverless} uses libtorch, which is an underlying native library for PyTorch. Trained models must be in a -TorchScript representation for use with {stack} {ml} features. +TorchScript representation for use with {stack} {{ml}} features. You can perform the following NLP operations: diff --git a/serverless/pages/explore-your-data.asciidoc b/serverless/pages/explore-your-data.asciidoc index ea4edc9aa5..f1ea853979 100644 --- a/serverless/pages/explore-your-data.asciidoc +++ b/serverless/pages/explore-your-data.asciidoc @@ -1,7 +1,7 @@ [[elasticsearch-explore-your-data]] = Explore your data -// :description: Turn {es} data into actionable insights with aggregations, visualizations, and alerts +// :description: Turn {{es}} data into actionable insights with aggregations, visualizations, and alerts // :keywords: serverless, elasticsearch, explore, overview In addition to search, {es3} offers several options for analyzing and visualizing your data. @@ -9,7 +9,7 @@ In addition to search, {es3} offers several options for analyzing and visualizin [NOTE] ==== These features are available on all Elastic deployment types: self-managed clusters, Elastic Cloud Hosted deployments, and {es-serverless} projects. -They are documented in the {es} and {kib} core documentation. +They are documented in the {{es}} and {kib} core documentation. ==== [discrete] diff --git a/serverless/pages/get-started.asciidoc b/serverless/pages/get-started.asciidoc index dab32e2df0..1a0be83ffa 100644 --- a/serverless/pages/get-started.asciidoc +++ b/serverless/pages/get-started.asciidoc @@ -22,11 +22,11 @@ On this page, you will learn how to: [[elasticsearch-get-started-create-project]] == Create an {es-serverless} project -Use your {ecloud} account to create a fully-managed {es} project: +Use your {ecloud} account to create a fully-managed {{es}} project: . Navigate to {ess-console}[cloud.elastic.co] and create a new account or log in to your existing account. . Within **Serverless Projects**, choose **Create project**. -. Choose the {es} project type. +. Choose the {{es}} project type. . Select a **configuration** for your project, based on your use case. + ** **General purpose**: For general search use cases across various data types. @@ -98,5 +98,5 @@ If you're already familiar with Elasticsearch, you can jump right into setting u [[elasticsearch-next-steps]] == Next steps -* Once you've added data to your {es-serverless} project, you can use {kibana-ref}/playground.html[Playground] to test and tweak {es} queries and chat with your data, using GenAI. -* You can also try our hands-on {ref}/quickstart.html#quickstart-list[quick start tutorials] in the core {es} documentation. \ No newline at end of file +* Once you've added data to your {es-serverless} project, you can use {kibana-ref}/playground.html[Playground] to test and tweak {{es}} queries and chat with your data, using GenAI. +* You can also try our hands-on {ref}/quickstart.html#quickstart-list[quick start tutorials] in the core {{es}} documentation. \ No newline at end of file diff --git a/serverless/pages/index-management.asciidoc b/serverless/pages/index-management.asciidoc index 99a3fe00fd..1660199b60 100644 --- a/serverless/pages/index-management.asciidoc +++ b/serverless/pages/index-management.asciidoc @@ -30,7 +30,7 @@ TBD: Are these RBAC requirements valid for serverless? ## Required permissions -If you use {es} {security-features}, the following security privileges are required: +If you use {{es}} {{security-features}}, the following security privileges are required: * The `monitor` cluster privilege to access Elastic's **{index-manage-app}** features. * The `view_index_metadata` and `manage` index privileges to view a data stream diff --git a/serverless/pages/ingest-pipelines.asciidoc b/serverless/pages/ingest-pipelines.asciidoc index da743d499d..c5490862be 100644 --- a/serverless/pages/ingest-pipelines.asciidoc +++ b/serverless/pages/ingest-pipelines.asciidoc @@ -11,7 +11,7 @@ For example, you can use pipelines to remove fields, extract values from text, a A pipeline consists of a series of configurable tasks called processors. Each processor runs sequentially, making specific changes to incoming documents. -After the processors have run, {es} adds the transformed documents to your data stream or index. +After the processors have run, {{es}} adds the transformed documents to your data stream or index. //// /* @@ -19,7 +19,7 @@ TBD: Do these requirements apply in serverless? ## Prerequisites - Nodes with the ingest node role handle pipeline processing. To use ingest pipelines, your cluster must have at least one node with the ingest role. For heavy ingest loads, we recommend creating dedicated ingest nodes. -- If the {es} security features are enabled, you must have the manage_pipeline cluster privilege to manage ingest pipelines. To use Kibana’s Ingest Pipelines feature, you also need the cluster:monitor/nodes/info cluster privileges. +- If the {{es}} security features are enabled, you must have the manage_pipeline cluster privilege to manage ingest pipelines. To use Kibana’s Ingest Pipelines feature, you also need the cluster:monitor/nodes/info cluster privileges. - Pipelines including the enrich processor require additional setup. See Enrich your data. */ //// diff --git a/serverless/pages/ingest-your-data-ingest-data-through-api.asciidoc b/serverless/pages/ingest-your-data-ingest-data-through-api.asciidoc index 78a14b9000..febb39a365 100644 --- a/serverless/pages/ingest-your-data-ingest-data-through-api.asciidoc +++ b/serverless/pages/ingest-your-data-ingest-data-through-api.asciidoc @@ -1,10 +1,10 @@ [[elasticsearch-ingest-data-through-api]] = Ingest data through API -// :description: Add data to {es} using HTTP APIs or a language client. +// :description: Add data to {{es}} using HTTP APIs or a language client. // :keywords: serverless, elasticsearch, ingest, api, how to -The {es} APIs enable you to ingest data through code. +The {{es}} APIs enable you to ingest data through code. You can use the APIs of one of the <> or the {es} HTTP APIs. The examples diff --git a/serverless/pages/ingest-your-data-ingest-data-through-integrations-beats.asciidoc b/serverless/pages/ingest-your-data-ingest-data-through-integrations-beats.asciidoc index 15d2b8c9f9..76d04a77b4 100644 --- a/serverless/pages/ingest-your-data-ingest-data-through-integrations-beats.asciidoc +++ b/serverless/pages/ingest-your-data-ingest-data-through-integrations-beats.asciidoc @@ -30,7 +30,7 @@ Depending on what data you want to collect, you may need to install multiple shi | https://www.elastic.co/products/beats/winlogbeat[Winlogbeat] |=== -{beats} can send data to {es} directly or through {ls}, where you +{beats} can send data to {{es}} directly or through {ls}, where you can further process and enhance the data before visualizing it in {kib}. .Authenticating with {es} diff --git a/serverless/pages/ingest-your-data-ingest-data-through-integrations-connector-client.asciidoc b/serverless/pages/ingest-your-data-ingest-data-through-integrations-connector-client.asciidoc index 78b6997c05..01740eb81f 100644 --- a/serverless/pages/ingest-your-data-ingest-data-through-integrations-connector-client.asciidoc +++ b/serverless/pages/ingest-your-data-ingest-data-through-integrations-connector-client.asciidoc @@ -113,7 +113,7 @@ You'll need to update these values in your https://github.com/elastic/connectors [[elasticsearch-ingest-data-through-integrations-connector-client-step-2-deploy-your-self-managed-connector]] == Step 2: Deploy your self-managed connector -To use connector clients, you must deploy the connector service so your connector can talk to your {es} instance. +To use connector clients, you must deploy the connector service so your connector can talk to your {{es}} instance. The source code is hosted in the `elastic/connectors` repository. You have two deployment options: @@ -125,8 +125,8 @@ You have two deployment options: ==== You'll need the following values handy to update your `config.yml` file: -* `elasticsearch.host`: Your {es} endpoint. Printed to the screen when you create a new connector. -* `elasticsearch.api_key`: Your {es} API key. You can create API keys by navigating to **Home**, and clicking **New** in the **API key** section. Once your connector is running, you'll be able to create a new API key that is limited to only access the connector's index. +* `elasticsearch.host`: Your {{es}} endpoint. Printed to the screen when you create a new connector. +* `elasticsearch.api_key`: Your {{es}} API key. You can create API keys by navigating to **Home**, and clicking **New** in the **API key** section. Once your connector is running, you'll be able to create a new API key that is limited to only access the connector's index. * `connector_id`: Unique id for your connector. Printed to the screen when you create a new connector. * `service_type`: Original data source type. Printed to the screen when you create a new connector. ==== @@ -251,7 +251,7 @@ make install make run ---- -The connector service should now be running in your terminal. If the connection to your {es} instance was successful, the **Configure your connector** step will be activated in the project's UI. +The connector service should now be running in your terminal. If the connection to your {{es}} instance was successful, the **Configure your connector** step will be activated in the project's UI. Here we're working locally. In a production setup, you'll deploy the connector service to your own infrastructure. @@ -280,7 +280,7 @@ For example, the Sharepoint Online connector requires the following details abou Once you've entered the data source details, you need to connect to an index. This is the final step in your project's UI, before you can run a sync. -You can choose to sync to an existing {es} index, or create a new index for your connector. +You can choose to sync to an existing {{es}} index, or create a new index for your connector. You can also create an API key that is limited to only access your selected index. .Index name prefix @@ -296,7 +296,7 @@ When choosing an existing index for the connector to sync to, please ensure mapp ==== Once this step is completed, you're ready to run a sync. -When a sync is launched you'll start to see documents being added to your {es} index. +When a sync is launched you'll start to see documents being added to your {{es}} index. Learn https://github.com/elastic/connectors/blob/main/docs/DEVELOPING.md#syncing[how syncing works] in the `elastic/connectors` repo docs. diff --git a/serverless/pages/ingest-your-data-ingest-data-through-integrations-logstash.asciidoc b/serverless/pages/ingest-your-data-ingest-data-through-integrations-logstash.asciidoc index 63640a7679..97c1564135 100644 --- a/serverless/pages/ingest-your-data-ingest-data-through-integrations-logstash.asciidoc +++ b/serverless/pages/ingest-your-data-ingest-data-through-integrations-logstash.asciidoc @@ -8,7 +8,7 @@ It supports a wide variety of data sources, and can dynamically unify data from disparate sources and normalize the data into destinations of your choice. {ls} can collect data using a variety of {ls} {logstash-ref}/input-plugins.html[input plugins], enrich and transform the data with {ls} {logstash-ref}/filter-plugins.html[filter plugins], -and output the data to {es} using the {ls} {logstash-ref}/plugins-outputs-elasticsearch.html[{es} output plugin]. +and output the data to {{es}} using the {ls} {logstash-ref}/plugins-outputs-elasticsearch.html[{es} output plugin]. You can use {ls} to extend <> for advanced use cases, such as data routed to multiple destinations or when you need to make your data persistent. @@ -22,10 +22,10 @@ Some capabilities and features for large, self-managed users aren't appropriate You'll use the {ls} {logstash-ref}/plugins-outputs-elasticsearch.html[{es} output plugin] to send data to {es3}. Some differences to note between {es3} and self-managed {es}: -* Your logstash-output-elasticsearch configuration uses **API keys** to access {es} from {ls}. +* Your logstash-output-elasticsearch configuration uses **API keys** to access {{es}} from {ls}. User-based security settings are ignored and may cause errors. * {es3} uses **{dlm} ({dlm-init})** instead of {ilm} ({ilm-init}). -If you add {ilm-init} settings to your {es} output configuration, they are ignored and may cause errors. +If you add {ilm-init} settings to your {{es}} output configuration, they are ignored and may cause errors. * **{ls} monitoring** for {serverless-short} is available through the https://github.com/elastic/integrations/blob/main/packages/logstash/_dev/build/docs/README.md[{ls} Integration] in <>. **Known issue** @@ -60,7 +60,7 @@ No additional SSL configuration steps are needed. == API keys for connecting {ls} to {es3} Use the **Security: API key** section in the UI to <> -for securely connecting the {ls} {es} output to {es3}. +for securely connecting the {ls} {{es}} output to {es3}. We recommend creating a unique API key per {ls} instance. You can create as many API keys as necessary. @@ -81,13 +81,13 @@ output { [discrete] [[elasticsearch-ingest-data-through-logstash-migrating-elasticsearch-data-using-ls]] -== Migrating {es} data using {ls} +== Migrating {{es}} data using {ls} -You can use {ls} to migrate data from self-managed {es} or {ess} to {es3}, or to migrate data from one {es3} deployment to another. +You can use {ls} to migrate data from self-managed {{es}} or {ess} to {es3}, or to migrate data from one {es3} deployment to another. -Create a {logstash-ref}/configuration.html[{ls} pipeline] that includes the {es} {logstash-ref}/plugins-inputs-elasticsearch.html[input plugin] and {logstash-ref}/plugins-outputs-elasticsearch.html[output plugin]. +Create a {logstash-ref}/configuration.html[{ls} pipeline] that includes the {{es}} {logstash-ref}/plugins-inputs-elasticsearch.html[input plugin] and {logstash-ref}/plugins-outputs-elasticsearch.html[output plugin]. -Configure the {es} input to point to your source deployment or instance, and configure the {es} output with the `cloud_id` and `api_key` settings for your target {es3} instance. +Configure the {{es}} input to point to your source deployment or instance, and configure the {{es}} output with the `cloud_id` and `api_key` settings for your target {es3} instance. If your origin index is using <>, then you might need to adjust your index settings. diff --git a/serverless/pages/ingest-your-data-upload-file.asciidoc b/serverless/pages/ingest-your-data-upload-file.asciidoc index 4c4bfeb24f..d0b432085c 100644 --- a/serverless/pages/ingest-your-data-upload-file.asciidoc +++ b/serverless/pages/ingest-your-data-upload-file.asciidoc @@ -1,10 +1,10 @@ [[elasticsearch-ingest-data-file-upload]] = Upload a file -// :description: Add data to {es} using the File Uploader. +// :description: Add data to {{es}} using the File Uploader. // :keywords: serverless, elasticsearch, ingest, how to -You can upload files to {es} using the File Uploader. +You can upload files to {{es}} using the File Uploader. Use the visualizer to inspect the data before importing it. You can upload different file formats for analysis: @@ -28,7 +28,7 @@ File formats supported up to 60 MB: [[elasticsearch-ingest-data-file-upload-how-to-upload-a-file]] == How to upload a file -You'll find a link to the Data Visualizer on the {es} **Getting Started** page. +You'll find a link to the Data Visualizer on the {{es}} **Getting Started** page. [role="screenshot"] image::images/file-data-visualizer-homepage-link.png[data visualizer link] diff --git a/serverless/pages/ingest-your-data.asciidoc b/serverless/pages/ingest-your-data.asciidoc index bd1fe042fd..12d4d793ae 100644 --- a/serverless/pages/ingest-your-data.asciidoc +++ b/serverless/pages/ingest-your-data.asciidoc @@ -10,9 +10,9 @@ The best ingest option(s) for your use case depends on whether you are indexing [[es-ingestion-overview-apis]] == Ingest data using APIs -You can use the <> to add data to your {es} indices, using any HTTP client, including the <>. +You can use the <> to add data to your {{es}} indices, using any HTTP client, including the <>. -While the {es} APIs can be used for any data type, Elastic provides specialized tools that optimize ingestion for specific use cases. +While the {{es}} APIs can be used for any data type, Elastic provides specialized tools that optimize ingestion for specific use cases. [discrete] [[es-ingestion-overview-general-content]] @@ -21,7 +21,7 @@ While the {es} APIs can be used for any data type, Elastic provides specialized General content is typically text-heavy data that does not have a timestamp. This could be data like knowledge bases, website content, product catalogs, and more. -You can use these specialized tools to add general content to {es} indices: +You can use these specialized tools to add general content to {{es}} indices: * <> * https://github.com/elastic/crawler[Elastic Open Web Crawler] @@ -38,7 +38,7 @@ Time series, or timestamped data, describes data that changes frequently and "fl Time series data refers to any document in standard indices or data streams that includes the `@timestamp` field. ==== -You can use these specialized tools to add timestamped data to {es} data streams: +You can use these specialized tools to add timestamped data to {{es}} data streams: * <> * <> diff --git a/serverless/pages/machine-learning.asciidoc b/serverless/pages/machine-learning.asciidoc index 5dba785e8a..d8b101de08 100644 --- a/serverless/pages/machine-learning.asciidoc +++ b/serverless/pages/machine-learning.asciidoc @@ -1,12 +1,12 @@ [[machine-learning]] = {ml-cap} -// :description: View, export, and import {ml} jobs and models. +// :description: View, export, and import {{ml}} jobs and models. // :keywords: serverless, Elasticsearch, Observability, Security This content applies to: {es-badge} {obs-badge} {sec-badge} -To view your {ml} resources, go to **{project-settings} → {manage-app} → {ml-app}**: +To view your {{ml}} resources, go to **{project-settings} → {manage-app} → {ml-app}**: [role="screenshot"] image::images/ml-security-management.png["Anomaly detection job management"] @@ -25,23 +25,23 @@ For more information, go to {ml-docs}/ml-ad-overview.html[{anomaly-detect-cap}], [[machine-learning-synchronize-saved-objects]] == Synchronize saved objects -Before you can view your {ml} {dfeeds}, jobs, and trained models in {kib}, they must have saved objects. +Before you can view your {{ml}} {{dfeeds}}, jobs, and trained models in {kib}, they must have saved objects. For example, if you used APIs to create your jobs, wait for automatic synchronization or go to the **{ml-app}** page and click **Synchronize saved objects**. [discrete] [[machine-learning-export-and-import-jobs]] == Export and import jobs -You can export and import your {ml} job and {dfeed} configuration details on the **{ml-app}** page. +You can export and import your {{ml}} job and {dfeed} configuration details on the **{ml-app}** page. For example, you can export jobs from your test environment and import them in your production environment. -The exported file contains configuration details; it does not contain the {ml} models. -For {anomaly-detect}, you must import and run the job to build a model that is accurate for the new environment. -For {dfanalytics}, trained models are portable; you can import the job then transfer the model to the new cluster. -Refer to {ml-docs}/ml-trained-models.html#export-import[Exporting and importing {dfanalytics} trained models]. +The exported file contains configuration details; it does not contain the {{ml}} models. +For {{anomaly-detect}}, you must import and run the job to build a model that is accurate for the new environment. +For {{dfanalytics}}, trained models are portable; you can import the job then transfer the model to the new cluster. +Refer to {ml-docs}/ml-trained-models.html#export-import[Exporting and importing {{dfanalytics}} trained models]. There are some additional actions that you must take before you can successfully import and run your jobs: -* The {data-sources} that are used by {anomaly-detect} {dfeeds} and {dfanalytics} source indices must exist; otherwise, the import fails. +* The {data-sources} that are used by {{anomaly-detect}} {{dfeeds}} and {{dfanalytics}} source indices must exist; otherwise, the import fails. * If your {anomaly-jobs} use custom rules with filter lists, the filter lists must exist; otherwise, the import fails. * If your {anomaly-jobs} were associated with calendars, you must create the calendar in the new environment and add your imported jobs to the calendar. diff --git a/serverless/pages/manage-org.asciidoc b/serverless/pages/manage-org.asciidoc index 48e09cfa5d..bde752b8f1 100644 --- a/serverless/pages/manage-org.asciidoc +++ b/serverless/pages/manage-org.asciidoc @@ -117,7 +117,7 @@ endif::[] |Detections admin |All available detection engine permissions to include creating rule actions, such as notifications to third-party systems. |{sec-badge} -|Endpoint policy manager |Access to endpoint policy management and related artifacts. Can manage {fleet} and integrations. |{sec-badge} +|Endpoint policy manager |Access to endpoint policy management and related artifacts. Can manage {{fleet}} and integrations. |{sec-badge} |=== diff --git a/serverless/pages/manage-your-project-rest-api.asciidoc b/serverless/pages/manage-your-project-rest-api.asciidoc index bc6e9996ad..72d8016530 100644 --- a/serverless/pages/manage-your-project-rest-api.asciidoc +++ b/serverless/pages/manage-your-project-rest-api.asciidoc @@ -87,7 +87,7 @@ curl -H "Authorization: ApiKey $API_KEY" \ <2> You can <>. The response from the create project request will include the created project details, such as the project ID, -the credentials to access the project, and the endpoints to access different apps such as {es} and Kibana. +the credentials to access the project, and the endpoints to access different apps such as {{es}} and Kibana. Example of `Create project` response: diff --git a/serverless/pages/manage-your-project.asciidoc b/serverless/pages/manage-your-project.asciidoc index 8d39472b22..eed5d53f62 100644 --- a/serverless/pages/manage-your-project.asciidoc +++ b/serverless/pages/manage-your-project.asciidoc @@ -20,7 +20,7 @@ Your project's performance and general data retention are controlled by the **Se //* **Rename your project**. In the **Overview** section, click **Edit** next to the project's name. //* **Manage data and integrations**. Update your project data, including storage settings, indices, and data views, directly in your project. -//* **Manage API keys**. Access your project and interact with its data programmatically using {es} APIs. +//* **Manage API keys**. Access your project and interact with its data programmatically using {{es}} APIs. //* **Manage members**. Add members and manage their access to this project or other resources of your organization. [discrete] diff --git a/serverless/pages/ml-nlp-auto-scale.asciidoc b/serverless/pages/ml-nlp-auto-scale.asciidoc index c16f8e5b23..4de6c2859b 100644 --- a/serverless/pages/ml-nlp-auto-scale.asciidoc +++ b/serverless/pages/ml-nlp-auto-scale.asciidoc @@ -6,7 +6,7 @@ This content applies to: {es-badge} {obs-badge} {sec-badge} You can enable autoscaling for each of your trained model deployments. -Autoscaling allows {es} to automatically adjust the resources the model deployment can use based on the workload demand. +Autoscaling allows {{es}} to automatically adjust the resources the model deployment can use based on the workload demand. There are two ways to enable autoscaling: @@ -68,7 +68,7 @@ Increasing the number of threads will make the search processes more performant. == Enabling autoscaling in {kib} - adaptive resources You can enable adaptive resources for your models when starting or updating the model deployment. -Adaptive resources make it possible for {es} to scale up or down the available resources based on the load on the process. +Adaptive resources make it possible for {{es}} to scale up or down the available resources based on the load on the process. This can help you to manage performance and cost more easily. When adaptive resources are enabled, the number of VCUs that the model deployment uses is set automatically based on the current load. When the load is high, the number of VCUs that the process can use is automatically increased. diff --git a/serverless/pages/pricing.asciidoc b/serverless/pages/pricing.asciidoc index edd49fb767..14e731096c 100644 --- a/serverless/pages/pricing.asciidoc +++ b/serverless/pages/pricing.asciidoc @@ -1,5 +1,5 @@ [[elasticsearch-billing]] -= {es} billing dimensions += {{es}} billing dimensions // :description: Learn about how Elasticsearch usage affects pricing. // :keywords: serverless, elasticsearch, overview @@ -39,7 +39,7 @@ queries per second (QPS) you require. [discrete] [[elasticsearch-billing-managing-elasticsearch-costs]] -== Managing {es} costs +== Managing {{es}} costs You can control costs using the following strategies: diff --git a/serverless/pages/profile-queries-and-aggregations.asciidoc b/serverless/pages/profile-queries-and-aggregations.asciidoc index e1c2b98acc..d367f71d1b 100644 --- a/serverless/pages/profile-queries-and-aggregations.asciidoc +++ b/serverless/pages/profile-queries-and-aggregations.asciidoc @@ -54,7 +54,7 @@ indices and shards, it doesn't necessarily represent the actual physical query t To see more profiling information, select **View details**. You'll find details about query components and the timing breakdown of low-level methods. -For more information, refer to {ref}/search-profile.html#profiling-queries[Profiling queries] in the {es} documentation. +For more information, refer to {ref}/search-profile.html#profiling-queries[Profiling queries] in the {{es}} documentation. [discrete] [[devtools-profile-queries-and-aggregations-filter-for-an-index-or-type]] @@ -168,7 +168,7 @@ image::images/profiler-gs10.png["Drilling into the first shard's details"] + For more information about how the **{searchprofiler}** works, how timings are calculated, and how to interpret various results, refer to -{ref}/search-profile.html#profiling-queries[Profiling queries] in the {es} documentation. +{ref}/search-profile.html#profiling-queries[Profiling queries] in the {{es}} documentation. [discrete] [[profiler-render-JSON]] diff --git a/serverless/pages/project-settings-data.asciidoc b/serverless/pages/project-settings-data.asciidoc index d0906f5b97..42b09504f8 100644 --- a/serverless/pages/project-settings-data.asciidoc +++ b/serverless/pages/project-settings-data.asciidoc @@ -43,10 +43,10 @@ To learn more about roles, refer to <>. | {es-badge}{obs-badge}{sec-badge} | <> -| View, export, and import your {anomaly-detect} and {dfanalytics} jobs and trained models. +| View, export, and import your {{anomaly-detect}} and {{dfanalytics}} jobs and trained models. | {es-badge}{obs-badge}{sec-badge} | <> -| Use transforms to pivot existing {es} indices into summarized or entity-centric indices. +| Use transforms to pivot existing {{es}} indices into summarized or entity-centric indices. | {es-badge}{obs-badge}{sec-badge} |=== diff --git a/serverless/pages/rules.asciidoc b/serverless/pages/rules.asciidoc index c602c830f9..09b662ab76 100644 --- a/serverless/pages/rules.asciidoc +++ b/serverless/pages/rules.asciidoc @@ -30,10 +30,10 @@ The following sections describe each part of the rule in more detail. */ Each project type supports a specific set of rule types. Each _rule type_ provides its own way of defining the conditions to detect, but an expression formed by a series of clauses is a common pattern. -For example, in an {es} query rule, you specify an index, a query, and a threshold, which uses a metric aggregation operation (`count`, `average`, `max`, `min`, or `sum`): +For example, in an {{es}} query rule, you specify an index, a query, and a threshold, which uses a metric aggregation operation (`count`, `average`, `max`, `min`, or `sum`): [role="screenshot"] -image::images/es-query-rule-conditions.png[UI for defining rule conditions in an {es} query rule] +image::images/es-query-rule-conditions.png[UI for defining rule conditions in an {{es}} query rule] // NOTE: This is an autogenerated screenshot. Do not edit it directly. @@ -74,10 +74,10 @@ Refer to <>. After you select a connector, set the _action frequency_. If you want to reduce the number of notifications you receive without affecting their timeliness, some rule types support alert summaries. -For example, if you create an {es} query rule, you can set the action frequency such that you receive summaries of the new, ongoing, and recovered alerts on a custom interval: +For example, if you create an {{es}} query rule, you can set the action frequency such that you receive summaries of the new, ongoing, and recovered alerts on a custom interval: [role="screenshot"] -image::images/es-query-rule-action-summary.png[UI for defining rule conditions in an {es} query rule] +image::images/es-query-rule-action-summary.png[UI for defining rule conditions in an {{es}} query rule] // @@ -86,7 +86,7 @@ If the rule type does not support alert summaries, this is your only available o You must choose when the action runs (for example, at each check interval, only when the alert status changes, or at a custom action interval). You must also choose an action group, which affects whether the action runs. Each rule type has a specific set of valid action groups. -For example, you can set _Run when_ to `Query matched` or `Recovered` for the {es} query rule: +For example, you can set _Run when_ to `Query matched` or `Recovered` for the {{es}} query rule: [role="screenshot"] image::images/es-query-rule-recovery-action.png[UI for defining a recovery action] diff --git a/serverless/pages/run-api-requests-in-the-console.asciidoc b/serverless/pages/run-api-requests-in-the-console.asciidoc index b49d4f2934..0e8ba098fd 100644 --- a/serverless/pages/run-api-requests-in-the-console.asciidoc +++ b/serverless/pages/run-api-requests-in-the-console.asciidoc @@ -22,7 +22,7 @@ You can also find Console directly on your {es-serverless} project pages, where == Write requests **Console** understands commands in a cURL-like syntax. -For example, the following is a `GET` request to the {es} `_search` API. +For example, the following is a `GET` request to the {{es}} `_search` API. [source,js] ---- diff --git a/serverless/pages/search-playground.asciidoc b/serverless/pages/search-playground.asciidoc index 7b52449045..2c94f97e5f 100644 --- a/serverless/pages/search-playground.asciidoc +++ b/serverless/pages/search-playground.asciidoc @@ -4,7 +4,7 @@ // :description: Test and edit Elasticsearch queries and chat with your data using LLMs. // :keywords: serverless, elasticsearch, search, playground, GenAI, LLMs -Use the Search Playground to test and edit {es} queries visually in the UI. Then use the Chat Playground to combine your {es} data with large language models (LLMs) for retrieval augmented generation (RAG). +Use the Search Playground to test and edit {{es}} queries visually in the UI. Then use the Chat Playground to combine your {{es}} data with large language models (LLMs) for retrieval augmented generation (RAG). You can also view the underlying Python code that powers the chat interface, and use it in your own application. Find Playground in the {es-serverless} UI under **{es} > Build > Playground**. diff --git a/serverless/pages/search-your-data-the-search-api.asciidoc b/serverless/pages/search-your-data-the-search-api.asciidoc index f0a0f29cd4..ac802baa15 100644 --- a/serverless/pages/search-your-data-the-search-api.asciidoc +++ b/serverless/pages/search-your-data-the-search-api.asciidoc @@ -13,9 +13,9 @@ queries. For example, a search may be limited to a specific index or only return a specific number of results. You can use the https://www.elastic.co/docs/api/doc/elasticsearch-serverless/group/endpoint-search[search API] to search and -aggregate data stored in {es} data streams or indices. +aggregate data stored in {{es}} data streams or indices. -For more information, refer to {ref}/search-your-data.html[the search API overview] in the core {es} docs. +For more information, refer to {ref}/search-your-data.html[the search API overview] in the core {{es}} docs. [discrete] [[elasticsearch-search-your-data-the-query-dsl]] @@ -33,5 +33,5 @@ Query DSL. Retrievers are an alternative to Query DSL that allow you to configure complex retrieval pipelines using a simplified syntax. Retrievers simplify the user experience by allowing entire retrieval pipelines to be configured in a single `_search` API call. -Learn more in the {ref}/retrievers-overview.html[Retrievers overview] in the core {es} docs. +Learn more in the {ref}/retrievers-overview.html[Retrievers overview] in the core {{es}} docs. diff --git a/serverless/pages/search-your-data.asciidoc b/serverless/pages/search-your-data.asciidoc index 194e88c41c..368e3a3c55 100644 --- a/serverless/pages/search-your-data.asciidoc +++ b/serverless/pages/search-your-data.asciidoc @@ -5,7 +5,7 @@ // :keywords: serverless, elasticsearch, search Searching your data in {es-serverless} works the same way as in other Elasticsearch deployments. -If you haven't used {es} before, you can learn the basics in the {ref}/elasticsearch-intro.html[core {es} documentation]. +If you haven't used {{es}} before, you can learn the basics in the {ref}/elasticsearch-intro.html[core {{es}} documentation]. You can use the https://www.elastic.co/docs/api/doc/elasticsearch-serverless[{es-serverless} REST APIs] to search your data using any HTTP client, including the <>, or directly in <>. @@ -13,14 +13,14 @@ You can also run searches using {kibana-ref}/discover.html[Discover] in your pro [TIP] ==== -Try our hands-on {ref}/quickstart.html#quickstart-list[quick start tutorials] in the core {es} documentation to get started, or check out our https://github.com/elastic/elasticsearch-labs/tree/main/notebooks#readme[Python notebooks]. +Try our hands-on {ref}/quickstart.html#quickstart-list[quick start tutorials] in the core {{es}} documentation to get started, or check out our https://github.com/elastic/elasticsearch-labs/tree/main/notebooks#readme[Python notebooks]. ==== [discrete] [[elasticsearch-search-your-data-query-languages-overview]] == Query languages -Learn about the various query languages you can use to search your data in the {ref}/search-analyze.html[core {es} documentation]. +Learn about the various query languages you can use to search your data in the {ref}/search-analyze.html[core {{es}} documentation]. [discrete] [[elasticsearch-search-your-data-learn-more]] diff --git a/serverless/pages/serverless-differences.asciidoc b/serverless/pages/serverless-differences.asciidoc index c20bd36fdc..cc8a819985 100644 --- a/serverless/pages/serverless-differences.asciidoc +++ b/serverless/pages/serverless-differences.asciidoc @@ -1,5 +1,5 @@ [[elasticsearch-differences]] -= Differences from other {es} offerings += Differences from other {{es}} offerings ++++ Serverless differences ++++ @@ -7,11 +7,11 @@ // :description: Understand how {es-serverless} differs from Elastic Cloud Hosted and self-managed offerings. // :keywords: serverless, elasticsearch -<> handles all the infrastructure management for you, providing a fully managed {es} service. +<> handles all the infrastructure management for you, providing a fully managed {{es}} service. -If you've used {es} before, you'll notice some differences in how you work with the service on {serverless-full}, because a number of APIs and settings are not required for serverless projects. +If you've used {{es}} before, you'll notice some differences in how you work with the service on {serverless-full}, because a number of APIs and settings are not required for serverless projects. -This guide helps you understand what's different, what's available, and how to work effectively when running {es} on {serverless-full}. +This guide helps you understand what's different, what's available, and how to work effectively when running {{es}} on {serverless-full}. [discrete] [[elasticsearch-differences-serverless-infrastructure-management]] @@ -24,7 +24,7 @@ This guide helps you understand what's different, what's available, and how to w * Shard distribution and replication * Resource utilization and monitoring -This fully managed approach means many traditional {es} infrastructure APIs and settings are not available to end users, as detailed in the following sections. +This fully managed approach means many traditional {{es}} infrastructure APIs and settings are not available to end users, as detailed in the following sections. [discrete] [[elasticsearch-differences-serverless-index-size]] @@ -50,7 +50,7 @@ To ensure optimal performance, follow these recommendations for sizing individua For large datasets that exceed the recommended maximum size for a single index, consider splitting your data across smaller indices and using an alias to search them collectively. -These recommendations do not apply to indices using better binary quantization (BBQ). Refer to {ref}/dense-vector.html#dense-vector-quantization[vector quantization] in the core {es} docs for more information. +These recommendations do not apply to indices using better binary quantization (BBQ). Refer to {ref}/dense-vector.html#dense-vector-quantization[vector quantization] in the core {{es}} docs for more information. [discrete] [[elasticsearch-differences-serverless-apis-availability]] @@ -105,7 +105,7 @@ In {es-serverless}, you can only configure {ref}/index-modules.html#index-module Cluster-level settings and node-level settings are not required by end users and the `elasticsearch.yml` file is fully managed by Elastic. Available settings:: -*Index-level settings*: Settings that control how {es} documents are processed, stored, and searched are available to end users. These include: +*Index-level settings*: Settings that control how {{es}} documents are processed, stored, and searched are available to end users. These include: * Analysis configuration * Mapping parameters * Search/query settings diff --git a/serverless/pages/sign-up.asciidoc b/serverless/pages/sign-up.asciidoc index d4dfcad875..f2852cd0c3 100644 --- a/serverless/pages/sign-up.asciidoc +++ b/serverless/pages/sign-up.asciidoc @@ -44,7 +44,7 @@ During the free 14 day trial, Elastic provides access to one hosted deployment a * You can have one active deployment at a time * The deployment size is limited to 8GB RAM and approximately 360GB of storage, depending on the specified hardware profile * Machine learning nodes are available up to 4GB RAM -* Custom {es} plugins are not enabled +* Custom {{es}} plugins are not enabled To learn more about Elastic Cloud Hosted, check our https://www.elastic.co/guide/en/cloud/current/ec-getting-started.html[Elasticsearch Service documentation]. @@ -59,7 +59,7 @@ To learn more about Elastic Cloud Hosted, check our https://www.elastic.co/guide Subscribe to https://www.elastic.co/guide/en/cloud/current/ec-billing-details.html[Elastic Cloud] for the following benefits: -* Increased memory or storage for deployment components, such as {es} clusters, machine learning nodes, and APM server. +* Increased memory or storage for deployment components, such as {{es}} clusters, machine learning nodes, and APM server. * As many deployments and projects as you need. * Third availability zone for your deployments. * Access to additional features, such as cross-cluster search and cross-cluster replication. diff --git a/serverless/pages/transforms.asciidoc b/serverless/pages/transforms.asciidoc index c561356f10..e6505a8822 100644 --- a/serverless/pages/transforms.asciidoc +++ b/serverless/pages/transforms.asciidoc @@ -6,7 +6,7 @@ This content applies to: {es-badge} {obs-badge} {sec-badge} -{transforms-cap} enable you to convert existing {es} indices into summarized +{transforms-cap} enable you to convert existing {{es}} indices into summarized indices, which provide opportunities for new insights and analytics. For example, you can use {transforms} to pivot your data into entity-centric diff --git a/serverless/pages/welcome-to-serverless.asciidoc b/serverless/pages/welcome-to-serverless.asciidoc index 5cce461ebf..c192c5cb59 100644 --- a/serverless/pages/welcome-to-serverless.asciidoc +++ b/serverless/pages/welcome-to-serverless.asciidoc @@ -1,8 +1,8 @@ -{serverless-full} is a fully managed solution that allows you to deploy and use Elastic for your use cases without managing the underlying infrastructure. It represents a shift in how you interact with {es} - instead of managing clusters, nodes, data tiers, and scaling, you create **serverless projects** that are fully managed and automatically scaled by Elastic. This abstraction of infrastructure decisions allows you to focus solely on gaining value and insight from your data. +{serverless-full} is a fully managed solution that allows you to deploy and use Elastic for your use cases without managing the underlying infrastructure. It represents a shift in how you interact with {{es}} - instead of managing clusters, nodes, data tiers, and scaling, you create **serverless projects** that are fully managed and automatically scaled by Elastic. This abstraction of infrastructure decisions allows you to focus solely on gaining value and insight from your data. -{serverless-full} automatically provisions, manages, and scales your {es} resources based on your actual usage. Unlike traditional deployments where you need to predict and provision resources in advance, serverless adapts to your workload in real-time, ensuring optimal performance while eliminating the need for manual capacity planning. +{serverless-full} automatically provisions, manages, and scales your {{es}} resources based on your actual usage. Unlike traditional deployments where you need to predict and provision resources in advance, serverless adapts to your workload in real-time, ensuring optimal performance while eliminating the need for manual capacity planning. -Serverless projects use the core components of the {stack}, such as {es} and {kib}, and are based on an architecture that +Serverless projects use the core components of the {stack}, such as {{es}} and {kib}, and are based on an architecture that decouples compute and storage. Search and indexing operations are separated, which offers high flexibility for scaling your workloads while ensuring a high level of performance. diff --git a/serverless/pages/what-is-elasticsearch-serverless.asciidoc b/serverless/pages/what-is-elasticsearch-serverless.asciidoc index 875015e4fb..0c09539847 100644 --- a/serverless/pages/what-is-elasticsearch-serverless.asciidoc +++ b/serverless/pages/what-is-elasticsearch-serverless.asciidoc @@ -9,14 +9,14 @@ .Understanding Elasticsearch on serverless [IMPORTANT] ==== -If you haven't used {es} before, first learn the basics in the https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-intro.html[core {es} documentation]. +If you haven't used {{es}} before, first learn the basics in the https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-intro.html[core {{es}} documentation]. ==== {es-serverless} is one of the three available project types on <>. This project type enables you to use the core functionality of {es}: searching, indexing, storing, and analyzing data of all shapes and sizes. -When using {es} on {serverless-full} you don’t need to worry about managing the infrastructure that keeps {es} distributed and available: nodes, shards, and replicas. These resources are completely automated on the serverless platform, which is designed to scale up and down with your workload. +When using {{es}} on {serverless-full} you don’t need to worry about managing the infrastructure that keeps {{es}} distributed and available: nodes, shards, and replicas. These resources are completely automated on the serverless platform, which is designed to scale up and down with your workload. This automation allows you to focus on building your search applications and solutions. @@ -28,19 +28,19 @@ This automation allows you to focus on building your search applications and sol |=== | 🚀 a| [.card-title]#<># + -Get started by creating your first {es} project on serverless. +Get started by creating your first {{es}} project on serverless. | 🔌 -a| [.card-title]#<># + +a| [.card-title]#<># + Learn how to connect your applications to your {es-serverless} endpoint. | ⤵️ a| [.card-title]#<># + -Learn how to get your data into {es} and start building your search application. +Learn how to get your data into {{es}} and start building your search application. | 🛝 a| [.card-title]#{kibana-ref}/playground.html[*Try Playground →*]# + -After you've added some data, use Playground to test out queries and combine {es} with the power of Generative AI in your applications. +After you've added some data, use Playground to test out queries and combine {{es}} with the power of Generative AI in your applications. |=== [discrete] @@ -51,9 +51,9 @@ After you've added some data, use Playground to test out queries and combine {es |=== | ❓ a| [.card-title]#<># + -Understand the differences between {es} on {serverless-full} and other deployment types. +Understand the differences between {{es}} on {serverless-full} and other deployment types. | 🧾 a| [.card-title]#<># + -Learn about the billing model for {es} on {serverless-full}. +Learn about the billing model for {{es}} on {serverless-full}. |=== diff --git a/serverless/partials/field-mappings-dense-vector.asciidoc b/serverless/partials/field-mappings-dense-vector.asciidoc index 3c1c7e5635..42bded676b 100644 --- a/serverless/partials/field-mappings-dense-vector.asciidoc +++ b/serverless/partials/field-mappings-dense-vector.asciidoc @@ -1,4 +1,4 @@ -The models compatible with {es} NLP generate dense vectors as output. The +The models compatible with {{es}} NLP generate dense vectors as output. The {ref}/dense-vector.html[`dense_vector`] field type is suitable for storing dense vectors of numeric values. The index must have a field with the `dense_vector` field type to index the embeddings that the supported third-party model that you diff --git a/serverless/partials/field-mappings-elser.asciidoc b/serverless/partials/field-mappings-elser.asciidoc index e633b80b7c..de47021333 100644 --- a/serverless/partials/field-mappings-elser.asciidoc +++ b/serverless/partials/field-mappings-elser.asciidoc @@ -1,5 +1,5 @@ ELSER produces token-weight pairs as output from the input text and the query. -The {es} {ref}/sparse-vector.html[`sparse_vector`] field type can store these +The {{es}} {ref}/sparse-vector.html[`sparse_vector`] field type can store these token-weight pairs as numeric feature vectors. The index must have a field with the `sparse_vector` field type to index the tokens that ELSER generates. diff --git a/solutions/observability/apps/installation-layout.md b/solutions/observability/apps/installation-layout.md index 12c3e6c46b..d35de60f55 100644 --- a/solutions/observability/apps/installation-layout.md +++ b/solutions/observability/apps/installation-layout.md @@ -22,7 +22,7 @@ View the installation layout and default paths for both Fleet-managed APM Server : Main {{agent}} configuration `/Library/Elastic/Agent/fleet.enc` -: Main {{agent}} {fleet} encrypted configuration +: Main {{agent}} {{fleet}} encrypted configuration `/Library/Elastic/Agent/data/elastic-agent-*/logs/elastic-agent.ndjson` : Log files for {{agent}} and {{beats}} shippers [1] @@ -41,7 +41,7 @@ You can install {{agent}} in a custom base path other than `/Library`. When ins : Main {{agent}} configuration `/opt/Elastic/Agent/fleet.enc` -: Main {{agent}} {fleet} encrypted configuration +: Main {{agent}} {{fleet}} encrypted configuration `/opt/Elastic/Agent/data/elastic-agent-*/logs/elastic-agent.ndjson` : Log files for {{agent}} and {{beats}} shippers [1] @@ -60,7 +60,7 @@ You can install {{agent}} in a custom base path other than `/opt`. When install : Main {{agent}} configuration `C:\Program Files\Elastic\Agent\fleet.enc` -: Main {{agent}} {fleet} encrypted configuration +: Main {{agent}} {{fleet}} encrypted configuration `C:\Program Files\Elastic\Agent\data\elastic-agent-*\logs\elastic-agent.ndjson` : Log files for {{agent}} and {{beats}} shippers [1] @@ -76,7 +76,7 @@ You can install {{agent}} in a custom base path other than `C:\Program Files`. : Main {{agent}} configuration `/etc/elastic-agent/fleet.enc` -: Main {{agent}} {fleet} encrypted configuration +: Main {{agent}} {{fleet}} encrypted configuration `/var/lib/elastic-agent/data/elastic-agent-*/logs/elastic-agent.ndjson` : Log files for {{agent}} and {{beats}} shippers [1] @@ -93,7 +93,7 @@ You can install {{agent}} in a custom base path other than `C:\Program Files`. : Main {{agent}} configuration `/etc/elastic-agent/fleet.enc` -: Main {{agent}} {fleet} encrypted configuration +: Main {{agent}} {{fleet}} encrypted configuration `/var/lib/elastic-agent/data/elastic-agent-*/logs/elastic-agent.ndjson` : Log files for {{agent}} and {{beats}} shippers [1] diff --git a/solutions/observability/apps/monitor-apm-server.md b/solutions/observability/apps/monitor-apm-server.md index cbe40aa573..eb0efb648e 100644 --- a/solutions/observability/apps/monitor-apm-server.md +++ b/solutions/observability/apps/monitor-apm-server.md @@ -5,7 +5,7 @@ mapped_pages: # Monitor APM Server [apm-monitor-apm] -Use the {{stack}} {monitor-features} to gain insight into the real-time health and performance of APM Server. Stack monitoring exposes key metrics, like intake response count, intake error rate, output event rate, output failed event rate, and more. +Use the {{stack}} {{monitor-features}} to gain insight into the real-time health and performance of APM Server. Stack monitoring exposes key metrics, like intake response count, intake error rate, output event rate, output failed event rate, and more. Select your deployment method to get started: diff --git a/solutions/observability/apps/monitor-fleet-managed-apm-server.md b/solutions/observability/apps/monitor-fleet-managed-apm-server.md index fc11df528f..6a06f2832f 100644 --- a/solutions/observability/apps/monitor-fleet-managed-apm-server.md +++ b/solutions/observability/apps/monitor-fleet-managed-apm-server.md @@ -154,11 +154,11 @@ See the [{{agent}} command reference](https://www.elastic.co/guide/en/fleet/curr If you configured the monitoring cluster to use encrypted communications, you must access it via HTTPS. For example, use a `hosts` setting like `https://es-mon-1:9200`. ::::{important} - The {{es}} {monitor-features} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one ingest node. + The {{es}} {{monitor-features}} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one ingest node. :::: - If the {{es}} {security-features} are enabled on the monitoring cluster, you must provide a valid user ID and password so that {{metricbeat}} can send metrics successfully: + If the {{es}} {{security-features}} are enabled on the monitoring cluster, you must provide a valid user ID and password so that {{metricbeat}} can send metrics successfully: 1. Create a user on the monitoring cluster that has the `remote_monitoring_agent` [built-in role](../../../deploy-manage/users-roles/cluster-or-deployment-auth/built-in-roles.md). Alternatively, if it’s available in your environment, use the `remote_monitoring_user` [built-in user](../../../deploy-manage/users-roles/cluster-or-deployment-auth/built-in-users.md). 2. Add the `username` and `password` settings to the {{es}} output information in the {{metricbeat}} configuration file. diff --git a/solutions/observability/apps/use-metricbeat-to-send-monitoring-data.md b/solutions/observability/apps/use-metricbeat-to-send-monitoring-data.md index d38a21ace6..bbf571856c 100644 --- a/solutions/observability/apps/use-metricbeat-to-send-monitoring-data.md +++ b/solutions/observability/apps/use-metricbeat-to-send-monitoring-data.md @@ -144,11 +144,11 @@ To collect and ship monitoring data: If you configured the monitoring cluster to use encrypted communications, you must access it via HTTPS. For example, use a `hosts` setting like `https://es-mon-1:9200`. ::::{important} - The {{es}} {monitor-features} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one ingest node. + The {{es}} {{monitor-features}} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one ingest node. :::: - If the {{es}} {security-features} are enabled on the monitoring cluster, you must provide a valid user ID and password so that {{metricbeat}} can send metrics successfully: + If the {{es}} {{security-features}} are enabled on the monitoring cluster, you must provide a valid user ID and password so that {{metricbeat}} can send metrics successfully: 1. Create a user on the monitoring cluster that has the `remote_monitoring_agent` [built-in role](../../../deploy-manage/users-roles/cluster-or-deployment-auth/built-in-roles.md). Alternatively, if it’s available in your environment, use the `remote_monitoring_user` [built-in user](../../../deploy-manage/users-roles/cluster-or-deployment-auth/built-in-users.md). diff --git a/solutions/observability/cloud/monitor-microsoft-azure-with-beats.md b/solutions/observability/cloud/monitor-microsoft-azure-with-beats.md index ce7274c138..305659f02f 100644 --- a/solutions/observability/cloud/monitor-microsoft-azure-with-beats.md +++ b/solutions/observability/cloud/monitor-microsoft-azure-with-beats.md @@ -61,7 +61,7 @@ Microsoft Azure allows you to find, deploy, and manage {{es}} from within the Az 1. To single sign-on directly into Elastic, select your Azure account. 2. To see if there is any available data, click **Observability**. There should be no data yet, but next, you will ingest logs. - ![{{kib}} {observability} page (no data)](../../../images/observability-monitor-azure-kibana-observability-page-empty.png "") + ![{{kib}} {{observability}} page (no data)](../../../images/observability-monitor-azure-kibana-observability-page-empty.png "") diff --git a/solutions/observability/infra-and-hosts/tutorial-observe-nginx-instances.md b/solutions/observability/infra-and-hosts/tutorial-observe-nginx-instances.md index 541c0b6bcc..6377598868 100644 --- a/solutions/observability/infra-and-hosts/tutorial-observe-nginx-instances.md +++ b/solutions/observability/infra-and-hosts/tutorial-observe-nginx-instances.md @@ -286,7 +286,7 @@ These anomaly detection jobs are available when you have data that matches the q ### Before you begin [monitor-nginx-ml-prereqs] -Verify that your environment is set up properly to use the {{ml-features}}. If {{es}} {security-features} are enabled, you need a user with permissions to manage {{anomaly-jobs}}. Refer to [Set up ML features](../../../explore-analyze/machine-learning/setting-up-machine-learning.md). +Verify that your environment is set up properly to use the {{ml-features}}. If {{es}} {{security-features}} are enabled, you need a user with permissions to manage {{anomaly-jobs}}. Refer to [Set up ML features](../../../explore-analyze/machine-learning/setting-up-machine-learning.md). ### Add nginx ML jobs [monitor-nginx-ml-add-jobs] diff --git a/solutions/observability/logs/categorize-log-entries.md b/solutions/observability/logs/categorize-log-entries.md index 4e2e3c8844..343cd2539f 100644 --- a/solutions/observability/logs/categorize-log-entries.md +++ b/solutions/observability/logs/categorize-log-entries.md @@ -10,7 +10,7 @@ Application log events are often unstructured and contain variable data. Many lo The **Categories** page enables you to identify patterns in your log events quickly. Instead of manually identifying similar logs, the logs categorization view lists log events that have been grouped based on their messages and formats so that you can take action quicker. ::::{note} -This feature makes use of {{ml}} {anomaly-jobs}. To set up jobs, you must have `all` {{kib}} feature privileges for **{{ml-app}}**. Users that have full or read-only access to {{ml-features}} within a {{kib}} space can view the results of *all* {{anomaly-jobs}} that are visible in that space, even if they do not have access to the source indices of those jobs. You must carefully consider who is given access to {{ml-features}}; {{anomaly-job}} results may propagate field values that contain sensitive information from the source indices to the results. For more details, refer to [Set up {{ml-features}}](../../../explore-analyze/machine-learning/setting-up-machine-learning.md). +This feature makes use of {{ml}} {{anomaly-jobs}}. To set up jobs, you must have `all` {{kib}} feature privileges for **{{ml-app}}**. Users that have full or read-only access to {{ml-features}} within a {{kib}} space can view the results of *all* {{anomaly-jobs}} that are visible in that space, even if they do not have access to the source indices of those jobs. You must carefully consider who is given access to {{ml-features}}; {{anomaly-job}} results may propagate field values that contain sensitive information from the source indices to the results. For more details, refer to [Set up {{ml-features}}](../../../explore-analyze/machine-learning/setting-up-machine-learning.md). :::: diff --git a/solutions/observability/logs/inspect-log-anomalies.md b/solutions/observability/logs/inspect-log-anomalies.md index 4344d0aaf8..7c1f7e22aa 100644 --- a/solutions/observability/logs/inspect-log-anomalies.md +++ b/solutions/observability/logs/inspect-log-anomalies.md @@ -15,7 +15,7 @@ When the {{anomaly-detect}} features of {{ml}} are enabled, you can use the **Lo You can also view log anomalies directly in the [{{ml-app}} app](../../../explore-analyze/machine-learning/machine-learning-in-kibana/xpack-ml-anomalies.md). ::::{note} -This feature makes use of {{ml}} {anomaly-jobs}. To set up jobs, you must have `all` {{kib}} feature privileges for **{{ml-app}}**. Users that have full or read-only access to {{ml-features}} within a {{kib}} space can view the results of *all* {{anomaly-jobs}} that are visible in that space, even if they do not have access to the source indices of those jobs. You must carefully consider who is given access to {{ml-features}}; {{anomaly-job}} results may propagate field values that contain sensitive information from the source indices to the results. For more details, refer to [Set up {{ml-features}}](../../../explore-analyze/machine-learning/setting-up-machine-learning.md). +This feature makes use of {{ml}} {{anomaly-jobs}}. To set up jobs, you must have `all` {{kib}} feature privileges for **{{ml-app}}**. Users that have full or read-only access to {{ml-features}} within a {{kib}} space can view the results of *all* {{anomaly-jobs}} that are visible in that space, even if they do not have access to the source indices of those jobs. You must carefully consider who is given access to {{ml-features}}; {{anomaly-job}} results may propagate field values that contain sensitive information from the source indices to the results. For more details, refer to [Set up {{ml-features}}](../../../explore-analyze/machine-learning/setting-up-machine-learning.md). :::: diff --git a/troubleshoot/ingest/fleet/common-problems.md b/troubleshoot/ingest/fleet/common-problems.md index 2870141a9b..5d5bcba810 100644 --- a/troubleshoot/ingest/fleet/common-problems.md +++ b/troubleshoot/ingest/fleet/common-problems.md @@ -64,7 +64,7 @@ Find troubleshooting information for {{fleet}}, {{fleet-server}}, and {{agent}} In {{fleet}}, if you delete an {{agent}} policy that is associated with one or more inactive enrolled agents, when the agent returns back to a `Healthy` or `Offline` state, it cannot be unenrolled. Attempting to unenroll the agent results in an `Error unenrolling agent` message, and the unenrollment fails. -To resolve this problem, you can use the [{{kib}} {fleet} APIs](https://www.elastic.co/guide/en/fleet/current/fleet-api-docs.html) to force unenroll the agent. +To resolve this problem, you can use the [{{kib}} {{fleet}} APIs](https://www.elastic.co/guide/en/fleet/current/fleet-api-docs.html) to force unenroll the agent. To uninstall a single {{agent}}: @@ -152,7 +152,7 @@ If you are unable to see {{fleet-server}} in {{kib}}, make sure it’s set up. To set up {{fleet-server}} on {{ecloud}}: 1. Go to your deployment on {{ecloud}}. -2. Follow the {{ecloud}} prompts to set up **{{integrations-server}}**. Once complete, the {{fleet-server}} {agent} will show up in {{fleet}}. +2. Follow the {{ecloud}} prompts to set up **{{integrations-server}}**. Once complete, the {{fleet-server}} {{agent}} will show up in {{fleet}}. To enable {{fleet}} and set up {{fleet-server}} on a self-managed cluster: @@ -261,7 +261,7 @@ Here are several steps to help you troubleshoot the problem. curl -f http://:8220/api/status ``` -3. Verify that you have specified the correct {{kib}} {fleet} settings URL and port for your environment. +3. Verify that you have specified the correct {{kib}} {{fleet}} settings URL and port for your environment. By default, HTTPS protocol and port 8220 is expected by {{fleet-server}} to communicate with {{es}} unless you have explicitly set it otherwise. @@ -539,7 +539,7 @@ Error: fail to enroll: fail to execute request to fleet-server: x509: certificat Error: enroll command failed with exit code: 1 ``` -To install or enroll against a self-signed cert {{fleet-server}} {agent}, add in the `--insecure` option to the command: +To install or enroll against a self-signed cert {{fleet-server}} {{agent}}, add in the `--insecure` option to the command: ```sh sudo ./elastic-agent install --url=https://:8220 --enrollment-token= --insecure @@ -657,9 +657,9 @@ To recover the {{agent}}: 1. In {{fleet}}, open the **Agents** tab and click **Add agent**. 2. In the **Add agent** flyout, select an agent policy that contains the **Fleet Server** integration. On Elastic Cloud you can use the **Elastic Cloud agent policy** which includes the integration. 3. Follow the instructions in the flyout, and stop before running the CLI commands. -4. Depending on the state of the original {{fleet-server}} {agent}, do one of the following: +4. Depending on the state of the original {{fleet-server}} {{agent}}, do one of the following: - * **The original {{fleet-server}} {agent} is still running and healthy** + * **The original {{fleet-server}} {{agent}} is still running and healthy** In this case, you only need to re-enroll the agent with {{fleet}}: @@ -679,7 +679,7 @@ To recover the {{agent}}: sudo ./elastic-agent enroll --url=https://fleet-server:8220 --enrollment-token=bXktc3VwZXItc2VjcmV0LWVucm9sbWVudC10b2tlbg== ``` - * **The original {{fleet-server}} {agent} is no longer installed** + * **The original {{fleet-server}} {{agent}} is no longer installed** In this case, you need to install the agent again: diff --git a/troubleshoot/kibana/monitoring.md b/troubleshoot/kibana/monitoring.md index 2be406ecda..c4c29e30f6 100644 --- a/troubleshoot/kibana/monitoring.md +++ b/troubleshoot/kibana/monitoring.md @@ -9,7 +9,7 @@ mapped_pages: # Monitoring [monitor-troubleshooting] -Use the information in this section to troubleshoot common problems and find answers for frequently asked questions related to the {{kib}} {monitor-features}. +Use the information in this section to troubleshoot common problems and find answers for frequently asked questions related to the {{kib}} {{monitor-features}}. ## Cannot view the cluster because the license information is invalid [_cannot_view_the_cluster_because_the_license_information_is_invalid] From 0c647f7b14aac0216faee5e47f94d66064c62c50 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 13 Feb 2025 14:35:28 -0500 Subject: [PATCH 2/6] unedit serverless --- serverless/pages/action-connectors.asciidoc | 2 +- serverless/pages/api-keys.asciidoc | 4 +- .../apis-elasticsearch-conventions.asciidoc | 24 +++++----- serverless/pages/apis-http-apis.asciidoc | 2 +- .../clients-dot-net-getting-started.asciidoc | 4 +- .../pages/clients-go-getting-started.asciidoc | 6 +-- .../clients-java-getting-started.asciidoc | 8 ++-- .../clients-nodejs-getting-started.asciidoc | 2 +- .../clients-php-getting-started.asciidoc | 4 +- .../clients-python-getting-started.asciidoc | 4 +- .../clients-ruby-getting-started.asciidoc | 6 +-- serverless/pages/clients.asciidoc | 2 +- serverless/pages/cloud-regions.asciidoc | 2 +- .../pages/connecting-to-es-endpoint.asciidoc | 8 ++-- serverless/pages/custom-roles.asciidoc | 14 +++--- serverless/pages/data-views.asciidoc | 44 +++++++++---------- .../pages/debug-grok-expressions.asciidoc | 8 ++-- .../developer-tools-troubleshooting.asciidoc | 8 ++-- .../elasticsearch-developer-tools.asciidoc | 4 +- ...ore-your-data-ml-nlp-deploy-model.asciidoc | 6 +-- .../explore-your-data-ml-nlp-elser.asciidoc | 4 +- ...ore-your-data-ml-nlp-import-model.asciidoc | 6 +-- ...xplore-your-data-ml-nlp-model-ref.asciidoc | 6 +-- ...lore-your-data-ml-nlp-ner-example.asciidoc | 2 +- ...e-your-data-ml-nlp-search-compare.asciidoc | 2 +- ...e-your-data-ml-nlp-test-inference.asciidoc | 4 +- ...ata-ml-nlp-text-embedding-example.asciidoc | 2 +- .../pages/explore-your-data-ml-nlp.asciidoc | 6 +-- serverless/pages/explore-your-data.asciidoc | 4 +- serverless/pages/get-started.asciidoc | 8 ++-- serverless/pages/index-management.asciidoc | 2 +- serverless/pages/ingest-pipelines.asciidoc | 4 +- ...your-data-ingest-data-through-api.asciidoc | 4 +- ...t-data-through-integrations-beats.asciidoc | 2 +- ...ugh-integrations-connector-client.asciidoc | 12 ++--- ...ata-through-integrations-logstash.asciidoc | 16 +++---- .../ingest-your-data-upload-file.asciidoc | 6 +-- serverless/pages/ingest-your-data.asciidoc | 8 ++-- serverless/pages/machine-learning.asciidoc | 18 ++++---- serverless/pages/manage-org.asciidoc | 2 +- .../manage-your-project-rest-api.asciidoc | 2 +- serverless/pages/manage-your-project.asciidoc | 2 +- serverless/pages/ml-nlp-auto-scale.asciidoc | 4 +- serverless/pages/pricing.asciidoc | 4 +- .../profile-queries-and-aggregations.asciidoc | 4 +- .../pages/project-settings-data.asciidoc | 4 +- serverless/pages/rules.asciidoc | 10 ++--- .../run-api-requests-in-the-console.asciidoc | 2 +- serverless/pages/search-playground.asciidoc | 2 +- .../search-your-data-the-search-api.asciidoc | 6 +-- serverless/pages/search-your-data.asciidoc | 6 +-- .../pages/serverless-differences.asciidoc | 14 +++--- serverless/pages/sign-up.asciidoc | 4 +- serverless/pages/transforms.asciidoc | 2 +- .../pages/welcome-to-serverless.asciidoc | 6 +-- .../what-is-elasticsearch-serverless.asciidoc | 16 +++---- 56 files changed, 184 insertions(+), 184 deletions(-) diff --git a/serverless/pages/action-connectors.asciidoc b/serverless/pages/action-connectors.asciidoc index 9b699bfa98..8d523714b1 100644 --- a/serverless/pages/action-connectors.asciidoc +++ b/serverless/pages/action-connectors.asciidoc @@ -47,7 +47,7 @@ Actions are instantiations of a connector that are linked to rules and run as ba //// //// -/* {kib} provides the following types of connectors for use with {{alert-features}} : +/* {kib} provides the following types of connectors for use with {alert-features} : - [D3 Security]({kibana-ref}/d3security-action-type.html) - [Email]({kibana-ref}/email-action-type.html) diff --git a/serverless/pages/api-keys.asciidoc b/serverless/pages/api-keys.asciidoc index f29ca6f3dc..d3a1519db0 100644 --- a/serverless/pages/api-keys.asciidoc +++ b/serverless/pages/api-keys.asciidoc @@ -9,7 +9,7 @@ This content applies to: {es-badge} {obs-badge} {sec-badge} API keys are security mechanisms used to authenticate and authorize access to {stack} resources, and ensure that only authorized users or applications are able to interact with the {stack}. -For example, if you extract data from an {{es}} cluster on a daily basis, you might create an API key tied to your credentials, configure it with minimum access, and then put the API credentials into a cron job. +For example, if you extract data from an {es} cluster on a daily basis, you might create an API key tied to your credentials, configure it with minimum access, and then put the API credentials into a cron job. Or, you might create API keys to automate ingestion of new data from remote sources, without a live user interaction. You can manage your keys in **{project-settings} → {manage-app} → {api-keys-app}**: @@ -44,7 +44,7 @@ In **{api-keys-app}**, click **Create API key**: [role="screenshot"] image::images/create-personal-api-key.png["Create API key UI"] -Once created, you can copy the encoded API key and use it to send requests to the {{es}} HTTP API. For example: +Once created, you can copy the encoded API key and use it to send requests to the {es} HTTP API. For example: [source,bash] ---- diff --git a/serverless/pages/apis-elasticsearch-conventions.asciidoc b/serverless/pages/apis-elasticsearch-conventions.asciidoc index 7d733b77a9..4681854faf 100644 --- a/serverless/pages/apis-elasticsearch-conventions.asciidoc +++ b/serverless/pages/apis-elasticsearch-conventions.asciidoc @@ -1,10 +1,10 @@ [[elasticsearch-api-conventions]] -= {{es}} API conventions += {es} API conventions // :description: The {es-serverless} REST APIs have conventions for headers and request bodies. // :keywords: serverless, elasticsearch, API, reference -You can run {{es}} API requests in **{dev-tools-app} → Console**. +You can run {es} API requests in **{dev-tools-app} → Console**. For example: [source,shell] @@ -18,8 +18,8 @@ Check out <>. [[elasticsearch-api-conventions-request-headers]] == Request headers -When you call {{es}} APIs outside of the Console, you must provide a request header. -The {{es}} APIs support the `Authorization`, `Content-Type`, and `X-Opaque-Id` headers. +When you call {es} APIs outside of the Console, you must provide a request header. +The {es} APIs support the `Authorization`, `Content-Type`, and `X-Opaque-Id` headers. [discrete] [[elasticsearch-api-conventions-authorization]] @@ -35,7 +35,7 @@ curl -X GET "${ES_URL}/_cat/indices?v=true" \ -H "Authorization: ApiKey ${API_KEY}" ---- -To get API keys for the {{es}} endpoint (`${ES_URL}`) for a project, refer to <>. +To get API keys for the {es} endpoint (`${ES_URL}`) for a project, refer to <>. [discrete] [[elasticsearch-api-conventions-content-type]] @@ -79,7 +79,7 @@ Because it's used only for traces, you can safely generate a unique `traceparent {es} APIs surface the header's `trace-id` value as `trace.id` in the: -* JSON {{es}} server logs +* JSON {es} server logs * Slow logs * Deprecation logs @@ -92,7 +92,7 @@ For example, a `traceparent` value of `00-0af7651916cd43dd8448eb211c80319c-b7ad6 [[elasticsearch-api-conventions-x-opaque-id]] === X-Opaque-Id -You can pass an `X-Opaque-Id` HTTP header to track the origin of a request in {{es}} logs and tasks. +You can pass an `X-Opaque-Id` HTTP header to track the origin of a request in {es} logs and tasks. For example: [source,bash] @@ -126,7 +126,7 @@ curl -X GET "${ES_URL}/_search?pretty" \ */ //// -For the deprecation logs, {{es}} also uses the `X-Opaque-Id` value to throttle and deduplicate deprecation warnings. +For the deprecation logs, {es} also uses the `X-Opaque-Id` value to throttle and deduplicate deprecation warnings. //// /* MISSING LINKS @@ -137,16 +137,16 @@ See \<\<_deprecation_logs_throttling>>. The `X-Opaque-Id` header accepts any arbitrary value. However, it is recommended that you limit these values to a finite set, such as an ID per client. Don't generate a unique `X-Opaque-Id` header for every request. -Too many unique `X-Opaque-Id` values can prevent {{es}} from deduplicating warnings in the deprecation logs. +Too many unique `X-Opaque-Id` values can prevent {es} from deduplicating warnings in the deprecation logs. [discrete] [[elasticsearch-api-conventions-request-bodies]] == Request bodies -A number of {{es}} APIs with GET operations--most notably the search API--support a request body. +A number of {es} APIs with GET operations--most notably the search API--support a request body. While the GET operation makes sense in the context of retrieving information, GET requests with a body are not supported by all HTTP libraries. -All {{es}} APIs with GET operations that require a body can also be submitted as POST requests. +All {es} APIs with GET operations that require a body can also be submitted as POST requests. Alternatively, you can pass the request body as the `source` query string parameter when using GET. When you use this method, the `source_content_type` parameter should also be passed with a media type value that indicates the format of the source, such as `application/json`. @@ -157,7 +157,7 @@ Error: no handler found for uri [.../_search?pretty=true] and method [GET]" ## Date math -Most {{es}} APIs that accept an index or index alias argument support date math. +Most {es} APIs that accept an index or index alias argument support date math. Date math name resolution enables you to search a range of time series indices or index aliases rather than searching all of your indices and filtering the results. Limiting the number of searched indices reduces cluster load and improves search performance. For example, if you are searching for errors in your daily logs, you can use a date math name template to restrict the search to the past two days. diff --git a/serverless/pages/apis-http-apis.asciidoc b/serverless/pages/apis-http-apis.asciidoc index 37085560e4..8de2a62aed 100644 --- a/serverless/pages/apis-http-apis.asciidoc +++ b/serverless/pages/apis-http-apis.asciidoc @@ -1,7 +1,7 @@ [[elasticsearch-http-apis]] = REST APIs -// :description: {{es}} and {kib} expose REST APIs that can be called directly to configure and access {stack} features. +// :description: {es} and {kib} expose REST APIs that can be called directly to configure and access {stack} features. // :keywords: serverless, elasticsearch, http, rest, overview [discrete] diff --git a/serverless/pages/clients-dot-net-getting-started.asciidoc b/serverless/pages/clients-dot-net-getting-started.asciidoc index 66ecf48c27..1f9ab78819 100644 --- a/serverless/pages/clients-dot-net-getting-started.asciidoc +++ b/serverless/pages/clients-dot-net-getting-started.asciidoc @@ -34,14 +34,14 @@ dotnet add package Elastic.Clients.Elasticsearch.Serverless [[elasticsearch-dot-net-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {{es}} endpoint: +Initialize the client using your API key and {es} endpoint: [source,net] ---- var client = new ElasticsearchClient("", new ApiKey("")); ---- -To get API keys for the {{es}} endpoint for a project, see <>. +To get API keys for the {es} endpoint for a project, see <>. [discrete] [[elasticsearch-dot-net-client-getting-started-using-the-api]] diff --git a/serverless/pages/clients-go-getting-started.asciidoc b/serverless/pages/clients-go-getting-started.asciidoc index ae0e0c0574..f3fd0c8f39 100644 --- a/serverless/pages/clients-go-getting-started.asciidoc +++ b/serverless/pages/clients-go-getting-started.asciidoc @@ -4,7 +4,7 @@ // :description: Set up and use the Go client. // :keywords: serverless, elasticsearch, go, how to -This page guides you through the installation process of the {{es}} Go +This page guides you through the installation process of the {es} Go client, shows you how to initialize the client, and how to perform basic {es} operations with it. @@ -55,7 +55,7 @@ import ( [[elasticsearch-go-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {{es}} endpoint: +Initialize the client using your API key and {es} endpoint: [source,go] ---- @@ -68,7 +68,7 @@ if err != nil { } ---- -To get API keys for the {{es}} endpoint for a project, see <>. +To get API keys for the {es} endpoint for a project, see <>. [discrete] [[elasticsearch-go-client-getting-started-using-the-api]] diff --git a/serverless/pages/clients-java-getting-started.asciidoc b/serverless/pages/clients-java-getting-started.asciidoc index 9768c9ea50..b3e8dcdf65 100644 --- a/serverless/pages/clients-java-getting-started.asciidoc +++ b/serverless/pages/clients-java-getting-started.asciidoc @@ -21,7 +21,7 @@ The same client is used for {es3}, on-premise and managed Elasticsearch. Some AP * Java 8 or later. * A JSON object mapping library to allow seamless integration of -your application classes with the {{es}} API. The examples below +your application classes with the {es} API. The examples below show usage with Jackson. [discrete] @@ -79,7 +79,7 @@ the following to the `pom.xml` of your project: [[elasticsearch-java-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {{es}} endpoint: +Initialize the client using your API key and {es} endpoint: [source,java] ---- @@ -103,7 +103,7 @@ ElasticsearchTransport transport = new RestClientTransport( ElasticsearchClient esClient = new ElasticsearchClient(transport); ---- -To get API keys for the {{es}} endpoint for a project, see <>. +To get API keys for the {es} endpoint for a project, see <>. [discrete] [[elasticsearch-java-client-getting-started-using-the-api]] @@ -157,7 +157,7 @@ SearchResponse response = esClient.search(s -> s A few things to note in the above example: * The search query is built using a hierarchy of lambda expressions that closely -follows the {{es}} HTTP API. Lambda expressions allows you to be guided +follows the {es} HTTP API. Lambda expressions allows you to be guided by your IDE's autocompletion, without having to import (or even know!) the actual classes representing a query. * The last parameter `Product.class` instructs the client to return results as diff --git a/serverless/pages/clients-nodejs-getting-started.asciidoc b/serverless/pages/clients-nodejs-getting-started.asciidoc index 7a64ab1c64..d4c57d03c4 100644 --- a/serverless/pages/clients-nodejs-getting-started.asciidoc +++ b/serverless/pages/clients-nodejs-getting-started.asciidoc @@ -39,7 +39,7 @@ npm install @elastic/elasticsearch-serverless [[elasticsearch-nodejs-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {{es}} endpoint: +Initialize the client using your API key and {es} endpoint: [source,js] ---- diff --git a/serverless/pages/clients-php-getting-started.asciidoc b/serverless/pages/clients-php-getting-started.asciidoc index 78b36b150b..87522d283f 100644 --- a/serverless/pages/clients-php-getting-started.asciidoc +++ b/serverless/pages/clients-php-getting-started.asciidoc @@ -39,7 +39,7 @@ composer require elastic/elasticsearch-serverless [[elasticsearch-php-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {{es}} endpoint: +Initialize the client using your API key and {es} endpoint: [source,php] ---- @@ -53,7 +53,7 @@ $client = ClientBuilder::create() ->build(); ---- -To get API keys for the {{es}} endpoint for a project, see <>. +To get API keys for the {es} endpoint for a project, see <>. [discrete] [[elasticsearch-php-client-getting-started-using-the-api]] diff --git a/serverless/pages/clients-python-getting-started.asciidoc b/serverless/pages/clients-python-getting-started.asciidoc index 15c4258193..c00fdce2aa 100644 --- a/serverless/pages/clients-python-getting-started.asciidoc +++ b/serverless/pages/clients-python-getting-started.asciidoc @@ -50,7 +50,7 @@ python -m pip install elasticsearch [[elasticsearch-python-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {{es}} endpoint: +Initialize the client using your API key and {es} endpoint: [source,python] ---- @@ -62,7 +62,7 @@ client = Elasticsearch( ) ---- -To get API keys for the {{es}} endpoint for a project, see <>. +To get API keys for the {es} endpoint for a project, see <>. [discrete] [[elasticsearch-python-client-getting-started-using-the-api]] diff --git a/serverless/pages/clients-ruby-getting-started.asciidoc b/serverless/pages/clients-ruby-getting-started.asciidoc index 0ef6da9eb1..f71553923c 100644 --- a/serverless/pages/clients-ruby-getting-started.asciidoc +++ b/serverless/pages/clients-ruby-getting-started.asciidoc @@ -18,7 +18,7 @@ client for {es3}, shows you how to initialize the client, and how to perform bas == Requirements * Ruby 3.0 or higher installed on your system. -* To use the `elasticsearch-serverless` gem, you must have an API key and {{es}} endpoint for an {es3} project. +* To use the `elasticsearch-serverless` gem, you must have an API key and {es} endpoint for an {es3} project. [discrete] [[elasticsearch-ruby-client-getting-started-installation]] @@ -90,7 +90,7 @@ bundle exec rake console [[elasticsearch-ruby-client-getting-started-initialize-the-client]] == Initialize the client -Initialize the client using your API key and {{es}} endpoint: +Initialize the client using your API key and {es} endpoint: [source,ruby] ---- @@ -100,7 +100,7 @@ client = ElasticsearchServerless::Client.new( ) ---- -To get API keys for the {{es}} endpoint for a project, see <>. +To get API keys for the {es} endpoint for a project, see <>. [discrete] [[elasticsearch-ruby-client-getting-started-using-the-api]] diff --git a/serverless/pages/clients.asciidoc b/serverless/pages/clients.asciidoc index 55fecdfc5f..eb747d853a 100644 --- a/serverless/pages/clients.asciidoc +++ b/serverless/pages/clients.asciidoc @@ -1,7 +1,7 @@ [[elasticsearch-clients]] = Client libraries -// :description: Index, search, and manage {{es}} data in your preferred language. +// :description: Index, search, and manage {es} data in your preferred language. // :keywords: serverless, elasticsearch, clients, overview You can use the following language clients with {es-serverless}: diff --git a/serverless/pages/cloud-regions.asciidoc b/serverless/pages/cloud-regions.asciidoc index d2bec3a22a..ec64f7dfa8 100644 --- a/serverless/pages/cloud-regions.asciidoc +++ b/serverless/pages/cloud-regions.asciidoc @@ -1,7 +1,7 @@ [[regions]] = Serverless regions -// :description: Index, search, and manage {{es}} data in your preferred language. +// :description: Index, search, and manage {es} data in your preferred language. // :keywords: serverless, regions, aws, azure, cloud A region is the geographic area where the data center of the cloud provider that hosts your project is located. Review the available Elastic Cloud Serverless regions to decide which region to use. If you aren't sure which region to pick, choose one that is geographically close to you to reduce latency. diff --git a/serverless/pages/connecting-to-es-endpoint.asciidoc b/serverless/pages/connecting-to-es-endpoint.asciidoc index 20faa531ae..3474623637 100644 --- a/serverless/pages/connecting-to-es-endpoint.asciidoc +++ b/serverless/pages/connecting-to-es-endpoint.asciidoc @@ -17,7 +17,7 @@ To connect to your Elasticsearch instance from your applications, client librari [[elasticsearch-get-started-create-api-key]] == Create a new API key -Create an API key to authenticate your requests to the {{es}} APIs. You'll need an API key for all API requests and client connections. +Create an API key to authenticate your requests to the {es} APIs. You'll need an API key for all API requests and client connections. To create a new API key: @@ -40,10 +40,10 @@ You can't recover or retrieve a lost API key. Instead, you must delete the key a [discrete] [[elasticsearch-get-started-endpoint]] -== Get your {{es}} endpoint URL +== Get your {es} endpoint URL -The endpoint URL is the address for your {{es}} instance. -You'll use this URL together with your API key to make requests to the {{es}} APIs. +The endpoint URL is the address for your {es} instance. +You'll use this URL together with your API key to make requests to the {es} APIs. To find the endpoint URL: diff --git a/serverless/pages/custom-roles.asciidoc b/serverless/pages/custom-roles.asciidoc index 2301903456..4bd795f0ac 100644 --- a/serverless/pages/custom-roles.asciidoc +++ b/serverless/pages/custom-roles.asciidoc @@ -34,7 +34,7 @@ image::images/custom-roles-ui.png[Custom Roles app] // TO-DO: This screenshot needs to be refreshed and automated. Roles are a collection of privileges that enable users to access project features and data. -For example, when you create a custom role, you can assign {{es}} cluster and index privileges and {kib} privileges. +For example, when you create a custom role, you can assign {es} cluster and index privileges and {kib} privileges. [NOTE] ==== @@ -43,13 +43,13 @@ You cannot assign {ref}/security-privileges.html#_run_as_privilege[run as privil [discrete] [[custom-roles-es-cluster-privileges]] -== {{es}} cluster privileges +== {es} cluster privileges Cluster privileges grant access to monitoring and management features in {es}. They also enable some {stack-manage-app} capabilities in your project. [role="screenshot"] -image::images/custom-roles-cluster-privileges.png[Create a custom role and define {{es}} cluster privileges] +image::images/custom-roles-cluster-privileges.png[Create a custom role and define {es} cluster privileges] // TO-DO: This screenshot needs to be refreshed and automated. @@ -57,22 +57,22 @@ Refer to {ref}/security-privileges.html#privileges-list-cluster[cluster privileg [discrete] [[custom-roles-es-index-privileges]] -== {{es}} index privileges +== {es} index privileges Each role can grant access to multiple data indices, and each index can have a different set of privileges. Typically, you will grant the `read` and `view_index_metadata` privileges to each index that you expect your users to work with. For example, grant access to indices that match an `acme-marketing-*` pattern: [role="screenshot"] -image::images/custom-roles-index-privileges.png[Create a custom role and define {{es}} index privileges] +image::images/custom-roles-index-privileges.png[Create a custom role and define {es} index privileges] // TO-DO: This screenshot needs to be refreshed and automated. Refer to {ref}/security-privileges.html#privileges-list-indices[index privileges] for a complete description of available options. Document-level and field-level security affords you even more granularity when it comes to granting access to your data. -With document-level security (DLS), you can write an {{es}} query to describe which documents this role grants access to. -With field-level security (FLS), you can instruct {{es}} to grant or deny access to specific fields within each document. +With document-level security (DLS), you can write an {es} query to describe which documents this role grants access to. +With field-level security (FLS), you can instruct {es} to grant or deny access to specific fields within each document. // Derived from https://www.elastic.co/guide/en/kibana/current/kibana-role-management.html#adding_cluster_privileges diff --git a/serverless/pages/data-views.asciidoc b/serverless/pages/data-views.asciidoc index 3098b124aa..3a0d613946 100644 --- a/serverless/pages/data-views.asciidoc +++ b/serverless/pages/data-views.asciidoc @@ -1,13 +1,13 @@ [[data-views]] = {data-sources-cap} -// :description: Elastic requires a {{data-source}} to access the {{es}} data that you want to explore. +// :description: Elastic requires a {data-source} to access the {es} data that you want to explore. // :keywords: serverless, Elasticsearch, Observability, Security This content applies to: {es-badge} {obs-badge} {sec-badge} -A {{data-source}} can point to one or more indices, {ref}/data-streams.html[data streams], or {ref}/alias.html[index aliases]. -For example, a {{data-source}} can point to your log data from yesterday or all indices that contain your data. +A {data-source} can point to one or more indices, {ref}/data-streams.html[data streams], or {ref}/alias.html[index aliases]. +For example, a {data-source} can point to your log data from yesterday or all indices that contain your data. //// /* @@ -17,7 +17,7 @@ For example, a {{data-source}} can point to your log data from yesterday or all * Access to **Data Views** requires the {kib} privilege `Data View Management`. -* To create a {{data-source}}, you must have the {es} privilege +* To create a {data-source}, you must have the {es} privilege `view_index_metadata`. * If a read-only indicator appears, you have insufficient privileges @@ -33,15 +33,15 @@ For example, a {{data-source}} can point to your log data from yesterday or all After you've loaded your data, follow these steps to create a {data-source}: -// +// . Go to **{project-settings} → {manage-app} → {data-views-app}**. Alternatively, go to **Discover** and open the data view menu. + [role="screenshot"] -image:images/discover-find-data-view.png[How to set the {{data-source}} in Discover] +image:images/discover-find-data-view.png[How to set the {data-source} in Discover] + . Click **Create a {data-source}**. -. Give your {{data-source}} a name. +. Give your {data-source} a name. . Start typing in the **Index pattern** field, and Elastic looks for the names of indices, data streams, and aliases that match your input. You can view all available sources or only the sources that the data view targets. @@ -65,8 +65,8 @@ based on different timestamps. . Click **Show advanced settings** to: + ** Display hidden and system indices. -** Specify your own {{data-source}} name. For example, enter your {{es}} index alias name. -. Click **Save {{data-source}} to Elastic**. +** Specify your own {data-source} name. For example, enter your {es} index alias name. +. Click **Save {data-source} to Elastic**. You can manage your data views in **{project-settings} → {manage-app} → {data-views-app}**. @@ -76,10 +76,10 @@ You can manage your data views in **{project-settings} → {manage-app} → {dat Want to explore your data or create a visualization without saving it as a data view? Select **Use without saving** in the **Create {data-source}** form in **Discover**. -With a temporary {{data-source}}, you can add fields and create an {{es}} query alert, just like you would a regular {{data-source}}. +With a temporary {data-source}, you can add fields and create an {es} query alert, just like you would a regular {data-source}. Your work won't be visible to others in your space. -A temporary {{data-source}} remains in your space until you change apps, or until you save it. +A temporary {data-source} remains in your space until you change apps, or until you save it. // ![how to create an ad-hoc data view](https://images.contentstack.io/v3/assets/bltefdd0b53724fa2ce/blte3a4f3994c44c0cc/637eb0c95834861044c21a25/ad-hoc-data-view.gif) @@ -94,7 +94,7 @@ A temporary {{data-source}} remains in your space until you change apps, or unti ### Use {data-sources} with rolled up data -A {{data-source}} can match one rollup index. For a combination rollup +A {data-source} can match one rollup index. For a combination rollup {data-source} with both raw and rolled up data, use the standard notation: ```ts @@ -108,8 +108,8 @@ For an example, refer to : Debug your searches using various {{es}} APIs. +// - : Debug your searches using various {es} APIs. diff --git a/serverless/pages/explore-your-data-ml-nlp-deploy-model.asciidoc b/serverless/pages/explore-your-data-ml-nlp-deploy-model.asciidoc index 35a816b786..6f24c54372 100644 --- a/serverless/pages/explore-your-data-ml-nlp-deploy-model.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-deploy-model.asciidoc @@ -27,7 +27,7 @@ allocation. Since eland uses APIs to deploy the models, you cannot see the models in {kib} until the saved objects are synchronized. You can follow the prompts in {kib}, wait for automatic synchronization, or use the -{kibana-ref}/machine-learning-api-sync.html[sync {{ml}} saved objects API]. +{kibana-ref}/machine-learning-api-sync.html[sync {ml} saved objects API]. ==== When you deploy the model, its allocations are distributed across available {ml} @@ -39,8 +39,8 @@ Throughput can be scaled by adding more allocations to the deployment; it increases the number of {infer} requests that can be performed in parallel. All allocations assigned to a node share the same copy of the model in memory. The model is loaded into memory in a native process that encapsulates `libtorch`, -which is the underlying {{ml}} library of PyTorch. The number of allocations -setting affects the amount of model allocations across all the {{ml}} nodes. Model +which is the underlying {ml} library of PyTorch. The number of allocations +setting affects the amount of model allocations across all the {ml} nodes. Model allocations are distributed in such a way that the total number of used threads does not exceed the allocated processors of a node. diff --git a/serverless/pages/explore-your-data-ml-nlp-elser.asciidoc b/serverless/pages/explore-your-data-ml-nlp-elser.asciidoc index 8e25a652c9..32a026f542 100644 --- a/serverless/pages/explore-your-data-ml-nlp-elser.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-elser.asciidoc @@ -32,7 +32,7 @@ for semantic search or the trial period activated. == Benchmarks The following sections provide information about how ELSER performs on different -hardwares and compares the model performance to {{es}} BM25 and other strong +hardwares and compares the model performance to {es} BM25 and other strong baselines such as Splade or OpenAI. [discrete] @@ -84,7 +84,7 @@ Discounted Cumulative Gain (NDCG) which can handle multiple relevant documents and fine-grained document ratings. The metric is applied to a fixed-sized list of retrieved documents which, in this case, is the top 10 documents (NDCG@10). -The table below shows the performance of ELSER compared to {{es}} BM25 with an +The table below shows the performance of ELSER compared to {es} BM25 with an English analyzer broken down by the 12 data sets used for the evaluation. ELSER has 10 wins, 1 draw, 1 loss and an average improvement in NDCG@10 of 17%. diff --git a/serverless/pages/explore-your-data-ml-nlp-import-model.asciidoc b/serverless/pages/explore-your-data-ml-nlp-import-model.asciidoc index dcffca6cf8..2fc9bba428 100644 --- a/serverless/pages/explore-your-data-ml-nlp-import-model.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-import-model.asciidoc @@ -19,7 +19,7 @@ Trained models must be in a TorchScript representation for use with {stack-ml-features}. ==== -https://github.com/elastic/eland[Eland] is an {{es}} Python client that +https://github.com/elastic/eland[Eland] is an {es} Python client that provides a simple script to perform the conversion of Hugging Face transformer models to their TorchScript representations, the chunking process, and upload to {es}; it is therefore the recommended import method. You can either install @@ -40,7 +40,7 @@ python -m pip install 'eland[pytorch]' + // NOTCONSOLE . Run the `eland_import_hub_model` script to download the model from Hugging -Face, convert it to TorchScript format, and upload to the {{es}} cluster. +Face, convert it to TorchScript format, and upload to the {es} cluster. For example: + // NOTCONSOLE @@ -100,7 +100,7 @@ docker run -it --rm elastic/eland \ --start ---- -Replace the `$ELASTICSEARCH_URL` with the URL for your {{es}} cluster. Refer to +Replace the `$ELASTICSEARCH_URL` with the URL for your {es} cluster. Refer to https://www.elastic.co/docs/current/serverless/elasticsearch/explore-your-data-ml-nlp/deploy-trained-models/import-model[Authentication methods] to learn more. diff --git a/serverless/pages/explore-your-data-ml-nlp-model-ref.asciidoc b/serverless/pages/explore-your-data-ml-nlp-model-ref.asciidoc index e0f1e73287..5c38de51ab 100644 --- a/serverless/pages/explore-your-data-ml-nlp-model-ref.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-model-ref.asciidoc @@ -71,13 +71,13 @@ for calculating the similarity between the embeddings they produce. Examples of typical scoring functions are: `cosine`, `dot product` and `euclidean distance` (also known as `l2_norm`). -The embeddings produced by these models should be indexed in {{es}} using the +The embeddings produced by these models should be indexed in {es} using the {ref}/dense-vector.html[dense vector field type] with an appropriate {ref}/dense-vector.html#dense-vector-params[similarity function] chosen for the model. -To find similar embeddings in {{es}} use the efficient +To find similar embeddings in {es} use the efficient {ref}/knn-search.html#approximate-knn[Approximate k-nearest neighbor (kNN)] search API with a text embedding as the query vector. Approximate kNN search uses the similarity function defined in the dense vector field mapping is used @@ -144,7 +144,7 @@ Using `DPREncoderWrapper`: == Expected model output Models used for each NLP task type must output tensors of a specific format to -be used in the {{es}} NLP pipelines. +be used in the {es} NLP pipelines. Here are the expected outputs for each task type. diff --git a/serverless/pages/explore-your-data-ml-nlp-ner-example.asciidoc b/serverless/pages/explore-your-data-ml-nlp-ner-example.asciidoc index c6c0bf64be..03cd5ce39a 100644 --- a/serverless/pages/explore-your-data-ml-nlp-ner-example.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-ner-example.asciidoc @@ -69,7 +69,7 @@ message is displayed at the top of the page that says _"ML job and trained model synchronization required"_. Follow the link to _"Synchronize your jobs and trained models."_ Then click **Synchronize**. You can also wait for the automatic synchronization that occurs in every hour, or -use the {kibana-ref}/ml-sync.html[sync {{ml}} objects API]. +use the {kibana-ref}/ml-sync.html[sync {ml} objects API]. [discrete] [[test-the-ner-model]] diff --git a/serverless/pages/explore-your-data-ml-nlp-search-compare.asciidoc b/serverless/pages/explore-your-data-ml-nlp-search-compare.asciidoc index 3c3fe71874..97730070a7 100644 --- a/serverless/pages/explore-your-data-ml-nlp-search-compare.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-search-compare.asciidoc @@ -14,7 +14,7 @@ unstructured text or compare different pieces of text. == Text embedding Text embedding is a task which produces a mathematical representation of text -called an embedding. The {{ml}} model turns the text into an array of numerical +called an embedding. The {ml} model turns the text into an array of numerical values (also known as a _vector_). Pieces of content with similar meaning have similar representations. This means it is possible to determine whether different pieces of text are either semantically similar, different, or even diff --git a/serverless/pages/explore-your-data-ml-nlp-test-inference.asciidoc b/serverless/pages/explore-your-data-ml-nlp-test-inference.asciidoc index 2bc29bf07d..4cd1b725bd 100644 --- a/serverless/pages/explore-your-data-ml-nlp-test-inference.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-test-inference.asciidoc @@ -4,7 +4,7 @@ // :keywords: serverless, elasticsearch, tbd When the model is deployed on at least one node in the cluster, you can begin to -perform inference. _{infer-cap}_ is a {{ml}} feature that enables you to use +perform inference. _{infer-cap}_ is a {ml} feature that enables you to use your trained models to perform NLP tasks (such as text extraction, classification, or embeddings) on incoming data. @@ -13,7 +13,7 @@ The simplest method to test your model against new data is to use the field of an existing index in your cluster to test the model: [role="screenshot"] -image::images/ml-nlp-test-ner.png[Testing a sentence with two named entities against a NER trained model in the {{ml}} app] +image::images/ml-nlp-test-ner.png[Testing a sentence with two named entities against a NER trained model in the {ml} app] Alternatively, you can use the {ref}/infer-trained-model.html[infer trained model API]. diff --git a/serverless/pages/explore-your-data-ml-nlp-text-embedding-example.asciidoc b/serverless/pages/explore-your-data-ml-nlp-text-embedding-example.asciidoc index fe4e8052c7..a581f8df33 100644 --- a/serverless/pages/explore-your-data-ml-nlp-text-embedding-example.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp-text-embedding-example.asciidoc @@ -76,7 +76,7 @@ message is displayed at the top of the page that says _"ML job and trained model synchronization required"_. Follow the link to _"Synchronize your jobs and trained models."_ Then click **Synchronize**. You can also wait for the automatic synchronization that occurs in every hour, or -use the {kibana-ref}/ml-sync.html[sync {{ml}} objects API]. +use the {kibana-ref}/ml-sync.html[sync {ml} objects API]. [discrete] [[test-the-text-embedding-model]] diff --git a/serverless/pages/explore-your-data-ml-nlp.asciidoc b/serverless/pages/explore-your-data-ml-nlp.asciidoc index 51550c9a5d..8fe9e9476d 100644 --- a/serverless/pages/explore-your-data-ml-nlp.asciidoc +++ b/serverless/pages/explore-your-data-ml-nlp.asciidoc @@ -6,7 +6,7 @@ natural language in spoken word or written text. Classically, NLP was performed using linguistic rules, dictionaries, regular -expressions, and {{ml}} for specific tasks such as automatic categorization or +expressions, and {ml} for specific tasks such as automatic categorization or summarization of text. In recent years, however, deep learning techniques have taken over much of the NLP landscape. Deep learning capitalizes on the availability of large scale data sets, cheap computation, and techniques for @@ -14,7 +14,7 @@ learning at scale with less human involvement. Pre-trained language models that use a transformer architecture have been particularly successful. For example, BERT is a pre-trained language model that was released by Google in 2018. Since that time, it has become the inspiration for most of today’s modern NLP -techniques. The {stack} {{ml}} features are structured around BERT and +techniques. The {stack} {ml} features are structured around BERT and transformer models. These features support BERT’s tokenization scheme (called WordPiece) and transformer models that conform to the standard BERT model interface. For the current list of supported architectures, refer to @@ -22,7 +22,7 @@ interface. For the current list of supported architectures, refer to To incorporate transformer models and make predictions, {es-serverless} uses libtorch, which is an underlying native library for PyTorch. Trained models must be in a -TorchScript representation for use with {stack} {{ml}} features. +TorchScript representation for use with {stack} {ml} features. You can perform the following NLP operations: diff --git a/serverless/pages/explore-your-data.asciidoc b/serverless/pages/explore-your-data.asciidoc index f1ea853979..ea4edc9aa5 100644 --- a/serverless/pages/explore-your-data.asciidoc +++ b/serverless/pages/explore-your-data.asciidoc @@ -1,7 +1,7 @@ [[elasticsearch-explore-your-data]] = Explore your data -// :description: Turn {{es}} data into actionable insights with aggregations, visualizations, and alerts +// :description: Turn {es} data into actionable insights with aggregations, visualizations, and alerts // :keywords: serverless, elasticsearch, explore, overview In addition to search, {es3} offers several options for analyzing and visualizing your data. @@ -9,7 +9,7 @@ In addition to search, {es3} offers several options for analyzing and visualizin [NOTE] ==== These features are available on all Elastic deployment types: self-managed clusters, Elastic Cloud Hosted deployments, and {es-serverless} projects. -They are documented in the {{es}} and {kib} core documentation. +They are documented in the {es} and {kib} core documentation. ==== [discrete] diff --git a/serverless/pages/get-started.asciidoc b/serverless/pages/get-started.asciidoc index 1a0be83ffa..dab32e2df0 100644 --- a/serverless/pages/get-started.asciidoc +++ b/serverless/pages/get-started.asciidoc @@ -22,11 +22,11 @@ On this page, you will learn how to: [[elasticsearch-get-started-create-project]] == Create an {es-serverless} project -Use your {ecloud} account to create a fully-managed {{es}} project: +Use your {ecloud} account to create a fully-managed {es} project: . Navigate to {ess-console}[cloud.elastic.co] and create a new account or log in to your existing account. . Within **Serverless Projects**, choose **Create project**. -. Choose the {{es}} project type. +. Choose the {es} project type. . Select a **configuration** for your project, based on your use case. + ** **General purpose**: For general search use cases across various data types. @@ -98,5 +98,5 @@ If you're already familiar with Elasticsearch, you can jump right into setting u [[elasticsearch-next-steps]] == Next steps -* Once you've added data to your {es-serverless} project, you can use {kibana-ref}/playground.html[Playground] to test and tweak {{es}} queries and chat with your data, using GenAI. -* You can also try our hands-on {ref}/quickstart.html#quickstart-list[quick start tutorials] in the core {{es}} documentation. \ No newline at end of file +* Once you've added data to your {es-serverless} project, you can use {kibana-ref}/playground.html[Playground] to test and tweak {es} queries and chat with your data, using GenAI. +* You can also try our hands-on {ref}/quickstart.html#quickstart-list[quick start tutorials] in the core {es} documentation. \ No newline at end of file diff --git a/serverless/pages/index-management.asciidoc b/serverless/pages/index-management.asciidoc index 1660199b60..99a3fe00fd 100644 --- a/serverless/pages/index-management.asciidoc +++ b/serverless/pages/index-management.asciidoc @@ -30,7 +30,7 @@ TBD: Are these RBAC requirements valid for serverless? ## Required permissions -If you use {{es}} {{security-features}}, the following security privileges are required: +If you use {es} {security-features}, the following security privileges are required: * The `monitor` cluster privilege to access Elastic's **{index-manage-app}** features. * The `view_index_metadata` and `manage` index privileges to view a data stream diff --git a/serverless/pages/ingest-pipelines.asciidoc b/serverless/pages/ingest-pipelines.asciidoc index c5490862be..da743d499d 100644 --- a/serverless/pages/ingest-pipelines.asciidoc +++ b/serverless/pages/ingest-pipelines.asciidoc @@ -11,7 +11,7 @@ For example, you can use pipelines to remove fields, extract values from text, a A pipeline consists of a series of configurable tasks called processors. Each processor runs sequentially, making specific changes to incoming documents. -After the processors have run, {{es}} adds the transformed documents to your data stream or index. +After the processors have run, {es} adds the transformed documents to your data stream or index. //// /* @@ -19,7 +19,7 @@ TBD: Do these requirements apply in serverless? ## Prerequisites - Nodes with the ingest node role handle pipeline processing. To use ingest pipelines, your cluster must have at least one node with the ingest role. For heavy ingest loads, we recommend creating dedicated ingest nodes. -- If the {{es}} security features are enabled, you must have the manage_pipeline cluster privilege to manage ingest pipelines. To use Kibana’s Ingest Pipelines feature, you also need the cluster:monitor/nodes/info cluster privileges. +- If the {es} security features are enabled, you must have the manage_pipeline cluster privilege to manage ingest pipelines. To use Kibana’s Ingest Pipelines feature, you also need the cluster:monitor/nodes/info cluster privileges. - Pipelines including the enrich processor require additional setup. See Enrich your data. */ //// diff --git a/serverless/pages/ingest-your-data-ingest-data-through-api.asciidoc b/serverless/pages/ingest-your-data-ingest-data-through-api.asciidoc index febb39a365..78a14b9000 100644 --- a/serverless/pages/ingest-your-data-ingest-data-through-api.asciidoc +++ b/serverless/pages/ingest-your-data-ingest-data-through-api.asciidoc @@ -1,10 +1,10 @@ [[elasticsearch-ingest-data-through-api]] = Ingest data through API -// :description: Add data to {{es}} using HTTP APIs or a language client. +// :description: Add data to {es} using HTTP APIs or a language client. // :keywords: serverless, elasticsearch, ingest, api, how to -The {{es}} APIs enable you to ingest data through code. +The {es} APIs enable you to ingest data through code. You can use the APIs of one of the <> or the {es} HTTP APIs. The examples diff --git a/serverless/pages/ingest-your-data-ingest-data-through-integrations-beats.asciidoc b/serverless/pages/ingest-your-data-ingest-data-through-integrations-beats.asciidoc index 76d04a77b4..15d2b8c9f9 100644 --- a/serverless/pages/ingest-your-data-ingest-data-through-integrations-beats.asciidoc +++ b/serverless/pages/ingest-your-data-ingest-data-through-integrations-beats.asciidoc @@ -30,7 +30,7 @@ Depending on what data you want to collect, you may need to install multiple shi | https://www.elastic.co/products/beats/winlogbeat[Winlogbeat] |=== -{beats} can send data to {{es}} directly or through {ls}, where you +{beats} can send data to {es} directly or through {ls}, where you can further process and enhance the data before visualizing it in {kib}. .Authenticating with {es} diff --git a/serverless/pages/ingest-your-data-ingest-data-through-integrations-connector-client.asciidoc b/serverless/pages/ingest-your-data-ingest-data-through-integrations-connector-client.asciidoc index 01740eb81f..78b6997c05 100644 --- a/serverless/pages/ingest-your-data-ingest-data-through-integrations-connector-client.asciidoc +++ b/serverless/pages/ingest-your-data-ingest-data-through-integrations-connector-client.asciidoc @@ -113,7 +113,7 @@ You'll need to update these values in your https://github.com/elastic/connectors [[elasticsearch-ingest-data-through-integrations-connector-client-step-2-deploy-your-self-managed-connector]] == Step 2: Deploy your self-managed connector -To use connector clients, you must deploy the connector service so your connector can talk to your {{es}} instance. +To use connector clients, you must deploy the connector service so your connector can talk to your {es} instance. The source code is hosted in the `elastic/connectors` repository. You have two deployment options: @@ -125,8 +125,8 @@ You have two deployment options: ==== You'll need the following values handy to update your `config.yml` file: -* `elasticsearch.host`: Your {{es}} endpoint. Printed to the screen when you create a new connector. -* `elasticsearch.api_key`: Your {{es}} API key. You can create API keys by navigating to **Home**, and clicking **New** in the **API key** section. Once your connector is running, you'll be able to create a new API key that is limited to only access the connector's index. +* `elasticsearch.host`: Your {es} endpoint. Printed to the screen when you create a new connector. +* `elasticsearch.api_key`: Your {es} API key. You can create API keys by navigating to **Home**, and clicking **New** in the **API key** section. Once your connector is running, you'll be able to create a new API key that is limited to only access the connector's index. * `connector_id`: Unique id for your connector. Printed to the screen when you create a new connector. * `service_type`: Original data source type. Printed to the screen when you create a new connector. ==== @@ -251,7 +251,7 @@ make install make run ---- -The connector service should now be running in your terminal. If the connection to your {{es}} instance was successful, the **Configure your connector** step will be activated in the project's UI. +The connector service should now be running in your terminal. If the connection to your {es} instance was successful, the **Configure your connector** step will be activated in the project's UI. Here we're working locally. In a production setup, you'll deploy the connector service to your own infrastructure. @@ -280,7 +280,7 @@ For example, the Sharepoint Online connector requires the following details abou Once you've entered the data source details, you need to connect to an index. This is the final step in your project's UI, before you can run a sync. -You can choose to sync to an existing {{es}} index, or create a new index for your connector. +You can choose to sync to an existing {es} index, or create a new index for your connector. You can also create an API key that is limited to only access your selected index. .Index name prefix @@ -296,7 +296,7 @@ When choosing an existing index for the connector to sync to, please ensure mapp ==== Once this step is completed, you're ready to run a sync. -When a sync is launched you'll start to see documents being added to your {{es}} index. +When a sync is launched you'll start to see documents being added to your {es} index. Learn https://github.com/elastic/connectors/blob/main/docs/DEVELOPING.md#syncing[how syncing works] in the `elastic/connectors` repo docs. diff --git a/serverless/pages/ingest-your-data-ingest-data-through-integrations-logstash.asciidoc b/serverless/pages/ingest-your-data-ingest-data-through-integrations-logstash.asciidoc index 97c1564135..63640a7679 100644 --- a/serverless/pages/ingest-your-data-ingest-data-through-integrations-logstash.asciidoc +++ b/serverless/pages/ingest-your-data-ingest-data-through-integrations-logstash.asciidoc @@ -8,7 +8,7 @@ It supports a wide variety of data sources, and can dynamically unify data from disparate sources and normalize the data into destinations of your choice. {ls} can collect data using a variety of {ls} {logstash-ref}/input-plugins.html[input plugins], enrich and transform the data with {ls} {logstash-ref}/filter-plugins.html[filter plugins], -and output the data to {{es}} using the {ls} {logstash-ref}/plugins-outputs-elasticsearch.html[{es} output plugin]. +and output the data to {es} using the {ls} {logstash-ref}/plugins-outputs-elasticsearch.html[{es} output plugin]. You can use {ls} to extend <> for advanced use cases, such as data routed to multiple destinations or when you need to make your data persistent. @@ -22,10 +22,10 @@ Some capabilities and features for large, self-managed users aren't appropriate You'll use the {ls} {logstash-ref}/plugins-outputs-elasticsearch.html[{es} output plugin] to send data to {es3}. Some differences to note between {es3} and self-managed {es}: -* Your logstash-output-elasticsearch configuration uses **API keys** to access {{es}} from {ls}. +* Your logstash-output-elasticsearch configuration uses **API keys** to access {es} from {ls}. User-based security settings are ignored and may cause errors. * {es3} uses **{dlm} ({dlm-init})** instead of {ilm} ({ilm-init}). -If you add {ilm-init} settings to your {{es}} output configuration, they are ignored and may cause errors. +If you add {ilm-init} settings to your {es} output configuration, they are ignored and may cause errors. * **{ls} monitoring** for {serverless-short} is available through the https://github.com/elastic/integrations/blob/main/packages/logstash/_dev/build/docs/README.md[{ls} Integration] in <>. **Known issue** @@ -60,7 +60,7 @@ No additional SSL configuration steps are needed. == API keys for connecting {ls} to {es3} Use the **Security: API key** section in the UI to <> -for securely connecting the {ls} {{es}} output to {es3}. +for securely connecting the {ls} {es} output to {es3}. We recommend creating a unique API key per {ls} instance. You can create as many API keys as necessary. @@ -81,13 +81,13 @@ output { [discrete] [[elasticsearch-ingest-data-through-logstash-migrating-elasticsearch-data-using-ls]] -== Migrating {{es}} data using {ls} +== Migrating {es} data using {ls} -You can use {ls} to migrate data from self-managed {{es}} or {ess} to {es3}, or to migrate data from one {es3} deployment to another. +You can use {ls} to migrate data from self-managed {es} or {ess} to {es3}, or to migrate data from one {es3} deployment to another. -Create a {logstash-ref}/configuration.html[{ls} pipeline] that includes the {{es}} {logstash-ref}/plugins-inputs-elasticsearch.html[input plugin] and {logstash-ref}/plugins-outputs-elasticsearch.html[output plugin]. +Create a {logstash-ref}/configuration.html[{ls} pipeline] that includes the {es} {logstash-ref}/plugins-inputs-elasticsearch.html[input plugin] and {logstash-ref}/plugins-outputs-elasticsearch.html[output plugin]. -Configure the {{es}} input to point to your source deployment or instance, and configure the {{es}} output with the `cloud_id` and `api_key` settings for your target {es3} instance. +Configure the {es} input to point to your source deployment or instance, and configure the {es} output with the `cloud_id` and `api_key` settings for your target {es3} instance. If your origin index is using <>, then you might need to adjust your index settings. diff --git a/serverless/pages/ingest-your-data-upload-file.asciidoc b/serverless/pages/ingest-your-data-upload-file.asciidoc index d0b432085c..4c4bfeb24f 100644 --- a/serverless/pages/ingest-your-data-upload-file.asciidoc +++ b/serverless/pages/ingest-your-data-upload-file.asciidoc @@ -1,10 +1,10 @@ [[elasticsearch-ingest-data-file-upload]] = Upload a file -// :description: Add data to {{es}} using the File Uploader. +// :description: Add data to {es} using the File Uploader. // :keywords: serverless, elasticsearch, ingest, how to -You can upload files to {{es}} using the File Uploader. +You can upload files to {es} using the File Uploader. Use the visualizer to inspect the data before importing it. You can upload different file formats for analysis: @@ -28,7 +28,7 @@ File formats supported up to 60 MB: [[elasticsearch-ingest-data-file-upload-how-to-upload-a-file]] == How to upload a file -You'll find a link to the Data Visualizer on the {{es}} **Getting Started** page. +You'll find a link to the Data Visualizer on the {es} **Getting Started** page. [role="screenshot"] image::images/file-data-visualizer-homepage-link.png[data visualizer link] diff --git a/serverless/pages/ingest-your-data.asciidoc b/serverless/pages/ingest-your-data.asciidoc index 12d4d793ae..bd1fe042fd 100644 --- a/serverless/pages/ingest-your-data.asciidoc +++ b/serverless/pages/ingest-your-data.asciidoc @@ -10,9 +10,9 @@ The best ingest option(s) for your use case depends on whether you are indexing [[es-ingestion-overview-apis]] == Ingest data using APIs -You can use the <> to add data to your {{es}} indices, using any HTTP client, including the <>. +You can use the <> to add data to your {es} indices, using any HTTP client, including the <>. -While the {{es}} APIs can be used for any data type, Elastic provides specialized tools that optimize ingestion for specific use cases. +While the {es} APIs can be used for any data type, Elastic provides specialized tools that optimize ingestion for specific use cases. [discrete] [[es-ingestion-overview-general-content]] @@ -21,7 +21,7 @@ While the {{es}} APIs can be used for any data type, Elastic provides specialize General content is typically text-heavy data that does not have a timestamp. This could be data like knowledge bases, website content, product catalogs, and more. -You can use these specialized tools to add general content to {{es}} indices: +You can use these specialized tools to add general content to {es} indices: * <> * https://github.com/elastic/crawler[Elastic Open Web Crawler] @@ -38,7 +38,7 @@ Time series, or timestamped data, describes data that changes frequently and "fl Time series data refers to any document in standard indices or data streams that includes the `@timestamp` field. ==== -You can use these specialized tools to add timestamped data to {{es}} data streams: +You can use these specialized tools to add timestamped data to {es} data streams: * <> * <> diff --git a/serverless/pages/machine-learning.asciidoc b/serverless/pages/machine-learning.asciidoc index d8b101de08..5dba785e8a 100644 --- a/serverless/pages/machine-learning.asciidoc +++ b/serverless/pages/machine-learning.asciidoc @@ -1,12 +1,12 @@ [[machine-learning]] = {ml-cap} -// :description: View, export, and import {{ml}} jobs and models. +// :description: View, export, and import {ml} jobs and models. // :keywords: serverless, Elasticsearch, Observability, Security This content applies to: {es-badge} {obs-badge} {sec-badge} -To view your {{ml}} resources, go to **{project-settings} → {manage-app} → {ml-app}**: +To view your {ml} resources, go to **{project-settings} → {manage-app} → {ml-app}**: [role="screenshot"] image::images/ml-security-management.png["Anomaly detection job management"] @@ -25,23 +25,23 @@ For more information, go to {ml-docs}/ml-ad-overview.html[{anomaly-detect-cap}], [[machine-learning-synchronize-saved-objects]] == Synchronize saved objects -Before you can view your {{ml}} {{dfeeds}}, jobs, and trained models in {kib}, they must have saved objects. +Before you can view your {ml} {dfeeds}, jobs, and trained models in {kib}, they must have saved objects. For example, if you used APIs to create your jobs, wait for automatic synchronization or go to the **{ml-app}** page and click **Synchronize saved objects**. [discrete] [[machine-learning-export-and-import-jobs]] == Export and import jobs -You can export and import your {{ml}} job and {dfeed} configuration details on the **{ml-app}** page. +You can export and import your {ml} job and {dfeed} configuration details on the **{ml-app}** page. For example, you can export jobs from your test environment and import them in your production environment. -The exported file contains configuration details; it does not contain the {{ml}} models. -For {{anomaly-detect}}, you must import and run the job to build a model that is accurate for the new environment. -For {{dfanalytics}}, trained models are portable; you can import the job then transfer the model to the new cluster. -Refer to {ml-docs}/ml-trained-models.html#export-import[Exporting and importing {{dfanalytics}} trained models]. +The exported file contains configuration details; it does not contain the {ml} models. +For {anomaly-detect}, you must import and run the job to build a model that is accurate for the new environment. +For {dfanalytics}, trained models are portable; you can import the job then transfer the model to the new cluster. +Refer to {ml-docs}/ml-trained-models.html#export-import[Exporting and importing {dfanalytics} trained models]. There are some additional actions that you must take before you can successfully import and run your jobs: -* The {data-sources} that are used by {{anomaly-detect}} {{dfeeds}} and {{dfanalytics}} source indices must exist; otherwise, the import fails. +* The {data-sources} that are used by {anomaly-detect} {dfeeds} and {dfanalytics} source indices must exist; otherwise, the import fails. * If your {anomaly-jobs} use custom rules with filter lists, the filter lists must exist; otherwise, the import fails. * If your {anomaly-jobs} were associated with calendars, you must create the calendar in the new environment and add your imported jobs to the calendar. diff --git a/serverless/pages/manage-org.asciidoc b/serverless/pages/manage-org.asciidoc index bde752b8f1..48e09cfa5d 100644 --- a/serverless/pages/manage-org.asciidoc +++ b/serverless/pages/manage-org.asciidoc @@ -117,7 +117,7 @@ endif::[] |Detections admin |All available detection engine permissions to include creating rule actions, such as notifications to third-party systems. |{sec-badge} -|Endpoint policy manager |Access to endpoint policy management and related artifacts. Can manage {{fleet}} and integrations. |{sec-badge} +|Endpoint policy manager |Access to endpoint policy management and related artifacts. Can manage {fleet} and integrations. |{sec-badge} |=== diff --git a/serverless/pages/manage-your-project-rest-api.asciidoc b/serverless/pages/manage-your-project-rest-api.asciidoc index 72d8016530..bc6e9996ad 100644 --- a/serverless/pages/manage-your-project-rest-api.asciidoc +++ b/serverless/pages/manage-your-project-rest-api.asciidoc @@ -87,7 +87,7 @@ curl -H "Authorization: ApiKey $API_KEY" \ <2> You can <>. The response from the create project request will include the created project details, such as the project ID, -the credentials to access the project, and the endpoints to access different apps such as {{es}} and Kibana. +the credentials to access the project, and the endpoints to access different apps such as {es} and Kibana. Example of `Create project` response: diff --git a/serverless/pages/manage-your-project.asciidoc b/serverless/pages/manage-your-project.asciidoc index eed5d53f62..8d39472b22 100644 --- a/serverless/pages/manage-your-project.asciidoc +++ b/serverless/pages/manage-your-project.asciidoc @@ -20,7 +20,7 @@ Your project's performance and general data retention are controlled by the **Se //* **Rename your project**. In the **Overview** section, click **Edit** next to the project's name. //* **Manage data and integrations**. Update your project data, including storage settings, indices, and data views, directly in your project. -//* **Manage API keys**. Access your project and interact with its data programmatically using {{es}} APIs. +//* **Manage API keys**. Access your project and interact with its data programmatically using {es} APIs. //* **Manage members**. Add members and manage their access to this project or other resources of your organization. [discrete] diff --git a/serverless/pages/ml-nlp-auto-scale.asciidoc b/serverless/pages/ml-nlp-auto-scale.asciidoc index 4de6c2859b..c16f8e5b23 100644 --- a/serverless/pages/ml-nlp-auto-scale.asciidoc +++ b/serverless/pages/ml-nlp-auto-scale.asciidoc @@ -6,7 +6,7 @@ This content applies to: {es-badge} {obs-badge} {sec-badge} You can enable autoscaling for each of your trained model deployments. -Autoscaling allows {{es}} to automatically adjust the resources the model deployment can use based on the workload demand. +Autoscaling allows {es} to automatically adjust the resources the model deployment can use based on the workload demand. There are two ways to enable autoscaling: @@ -68,7 +68,7 @@ Increasing the number of threads will make the search processes more performant. == Enabling autoscaling in {kib} - adaptive resources You can enable adaptive resources for your models when starting or updating the model deployment. -Adaptive resources make it possible for {{es}} to scale up or down the available resources based on the load on the process. +Adaptive resources make it possible for {es} to scale up or down the available resources based on the load on the process. This can help you to manage performance and cost more easily. When adaptive resources are enabled, the number of VCUs that the model deployment uses is set automatically based on the current load. When the load is high, the number of VCUs that the process can use is automatically increased. diff --git a/serverless/pages/pricing.asciidoc b/serverless/pages/pricing.asciidoc index 14e731096c..edd49fb767 100644 --- a/serverless/pages/pricing.asciidoc +++ b/serverless/pages/pricing.asciidoc @@ -1,5 +1,5 @@ [[elasticsearch-billing]] -= {{es}} billing dimensions += {es} billing dimensions // :description: Learn about how Elasticsearch usage affects pricing. // :keywords: serverless, elasticsearch, overview @@ -39,7 +39,7 @@ queries per second (QPS) you require. [discrete] [[elasticsearch-billing-managing-elasticsearch-costs]] -== Managing {{es}} costs +== Managing {es} costs You can control costs using the following strategies: diff --git a/serverless/pages/profile-queries-and-aggregations.asciidoc b/serverless/pages/profile-queries-and-aggregations.asciidoc index d367f71d1b..e1c2b98acc 100644 --- a/serverless/pages/profile-queries-and-aggregations.asciidoc +++ b/serverless/pages/profile-queries-and-aggregations.asciidoc @@ -54,7 +54,7 @@ indices and shards, it doesn't necessarily represent the actual physical query t To see more profiling information, select **View details**. You'll find details about query components and the timing breakdown of low-level methods. -For more information, refer to {ref}/search-profile.html#profiling-queries[Profiling queries] in the {{es}} documentation. +For more information, refer to {ref}/search-profile.html#profiling-queries[Profiling queries] in the {es} documentation. [discrete] [[devtools-profile-queries-and-aggregations-filter-for-an-index-or-type]] @@ -168,7 +168,7 @@ image::images/profiler-gs10.png["Drilling into the first shard's details"] + For more information about how the **{searchprofiler}** works, how timings are calculated, and how to interpret various results, refer to -{ref}/search-profile.html#profiling-queries[Profiling queries] in the {{es}} documentation. +{ref}/search-profile.html#profiling-queries[Profiling queries] in the {es} documentation. [discrete] [[profiler-render-JSON]] diff --git a/serverless/pages/project-settings-data.asciidoc b/serverless/pages/project-settings-data.asciidoc index 42b09504f8..d0906f5b97 100644 --- a/serverless/pages/project-settings-data.asciidoc +++ b/serverless/pages/project-settings-data.asciidoc @@ -43,10 +43,10 @@ To learn more about roles, refer to <>. | {es-badge}{obs-badge}{sec-badge} | <> -| View, export, and import your {{anomaly-detect}} and {{dfanalytics}} jobs and trained models. +| View, export, and import your {anomaly-detect} and {dfanalytics} jobs and trained models. | {es-badge}{obs-badge}{sec-badge} | <> -| Use transforms to pivot existing {{es}} indices into summarized or entity-centric indices. +| Use transforms to pivot existing {es} indices into summarized or entity-centric indices. | {es-badge}{obs-badge}{sec-badge} |=== diff --git a/serverless/pages/rules.asciidoc b/serverless/pages/rules.asciidoc index 09b662ab76..c602c830f9 100644 --- a/serverless/pages/rules.asciidoc +++ b/serverless/pages/rules.asciidoc @@ -30,10 +30,10 @@ The following sections describe each part of the rule in more detail. */ Each project type supports a specific set of rule types. Each _rule type_ provides its own way of defining the conditions to detect, but an expression formed by a series of clauses is a common pattern. -For example, in an {{es}} query rule, you specify an index, a query, and a threshold, which uses a metric aggregation operation (`count`, `average`, `max`, `min`, or `sum`): +For example, in an {es} query rule, you specify an index, a query, and a threshold, which uses a metric aggregation operation (`count`, `average`, `max`, `min`, or `sum`): [role="screenshot"] -image::images/es-query-rule-conditions.png[UI for defining rule conditions in an {{es}} query rule] +image::images/es-query-rule-conditions.png[UI for defining rule conditions in an {es} query rule] // NOTE: This is an autogenerated screenshot. Do not edit it directly. @@ -74,10 +74,10 @@ Refer to <>. After you select a connector, set the _action frequency_. If you want to reduce the number of notifications you receive without affecting their timeliness, some rule types support alert summaries. -For example, if you create an {{es}} query rule, you can set the action frequency such that you receive summaries of the new, ongoing, and recovered alerts on a custom interval: +For example, if you create an {es} query rule, you can set the action frequency such that you receive summaries of the new, ongoing, and recovered alerts on a custom interval: [role="screenshot"] -image::images/es-query-rule-action-summary.png[UI for defining rule conditions in an {{es}} query rule] +image::images/es-query-rule-action-summary.png[UI for defining rule conditions in an {es} query rule] // @@ -86,7 +86,7 @@ If the rule type does not support alert summaries, this is your only available o You must choose when the action runs (for example, at each check interval, only when the alert status changes, or at a custom action interval). You must also choose an action group, which affects whether the action runs. Each rule type has a specific set of valid action groups. -For example, you can set _Run when_ to `Query matched` or `Recovered` for the {{es}} query rule: +For example, you can set _Run when_ to `Query matched` or `Recovered` for the {es} query rule: [role="screenshot"] image::images/es-query-rule-recovery-action.png[UI for defining a recovery action] diff --git a/serverless/pages/run-api-requests-in-the-console.asciidoc b/serverless/pages/run-api-requests-in-the-console.asciidoc index 0e8ba098fd..b49d4f2934 100644 --- a/serverless/pages/run-api-requests-in-the-console.asciidoc +++ b/serverless/pages/run-api-requests-in-the-console.asciidoc @@ -22,7 +22,7 @@ You can also find Console directly on your {es-serverless} project pages, where == Write requests **Console** understands commands in a cURL-like syntax. -For example, the following is a `GET` request to the {{es}} `_search` API. +For example, the following is a `GET` request to the {es} `_search` API. [source,js] ---- diff --git a/serverless/pages/search-playground.asciidoc b/serverless/pages/search-playground.asciidoc index 2c94f97e5f..7b52449045 100644 --- a/serverless/pages/search-playground.asciidoc +++ b/serverless/pages/search-playground.asciidoc @@ -4,7 +4,7 @@ // :description: Test and edit Elasticsearch queries and chat with your data using LLMs. // :keywords: serverless, elasticsearch, search, playground, GenAI, LLMs -Use the Search Playground to test and edit {{es}} queries visually in the UI. Then use the Chat Playground to combine your {{es}} data with large language models (LLMs) for retrieval augmented generation (RAG). +Use the Search Playground to test and edit {es} queries visually in the UI. Then use the Chat Playground to combine your {es} data with large language models (LLMs) for retrieval augmented generation (RAG). You can also view the underlying Python code that powers the chat interface, and use it in your own application. Find Playground in the {es-serverless} UI under **{es} > Build > Playground**. diff --git a/serverless/pages/search-your-data-the-search-api.asciidoc b/serverless/pages/search-your-data-the-search-api.asciidoc index ac802baa15..f0a0f29cd4 100644 --- a/serverless/pages/search-your-data-the-search-api.asciidoc +++ b/serverless/pages/search-your-data-the-search-api.asciidoc @@ -13,9 +13,9 @@ queries. For example, a search may be limited to a specific index or only return a specific number of results. You can use the https://www.elastic.co/docs/api/doc/elasticsearch-serverless/group/endpoint-search[search API] to search and -aggregate data stored in {{es}} data streams or indices. +aggregate data stored in {es} data streams or indices. -For more information, refer to {ref}/search-your-data.html[the search API overview] in the core {{es}} docs. +For more information, refer to {ref}/search-your-data.html[the search API overview] in the core {es} docs. [discrete] [[elasticsearch-search-your-data-the-query-dsl]] @@ -33,5 +33,5 @@ Query DSL. Retrievers are an alternative to Query DSL that allow you to configure complex retrieval pipelines using a simplified syntax. Retrievers simplify the user experience by allowing entire retrieval pipelines to be configured in a single `_search` API call. -Learn more in the {ref}/retrievers-overview.html[Retrievers overview] in the core {{es}} docs. +Learn more in the {ref}/retrievers-overview.html[Retrievers overview] in the core {es} docs. diff --git a/serverless/pages/search-your-data.asciidoc b/serverless/pages/search-your-data.asciidoc index 368e3a3c55..194e88c41c 100644 --- a/serverless/pages/search-your-data.asciidoc +++ b/serverless/pages/search-your-data.asciidoc @@ -5,7 +5,7 @@ // :keywords: serverless, elasticsearch, search Searching your data in {es-serverless} works the same way as in other Elasticsearch deployments. -If you haven't used {{es}} before, you can learn the basics in the {ref}/elasticsearch-intro.html[core {{es}} documentation]. +If you haven't used {es} before, you can learn the basics in the {ref}/elasticsearch-intro.html[core {es} documentation]. You can use the https://www.elastic.co/docs/api/doc/elasticsearch-serverless[{es-serverless} REST APIs] to search your data using any HTTP client, including the <>, or directly in <>. @@ -13,14 +13,14 @@ You can also run searches using {kibana-ref}/discover.html[Discover] in your pro [TIP] ==== -Try our hands-on {ref}/quickstart.html#quickstart-list[quick start tutorials] in the core {{es}} documentation to get started, or check out our https://github.com/elastic/elasticsearch-labs/tree/main/notebooks#readme[Python notebooks]. +Try our hands-on {ref}/quickstart.html#quickstart-list[quick start tutorials] in the core {es} documentation to get started, or check out our https://github.com/elastic/elasticsearch-labs/tree/main/notebooks#readme[Python notebooks]. ==== [discrete] [[elasticsearch-search-your-data-query-languages-overview]] == Query languages -Learn about the various query languages you can use to search your data in the {ref}/search-analyze.html[core {{es}} documentation]. +Learn about the various query languages you can use to search your data in the {ref}/search-analyze.html[core {es} documentation]. [discrete] [[elasticsearch-search-your-data-learn-more]] diff --git a/serverless/pages/serverless-differences.asciidoc b/serverless/pages/serverless-differences.asciidoc index cc8a819985..c20bd36fdc 100644 --- a/serverless/pages/serverless-differences.asciidoc +++ b/serverless/pages/serverless-differences.asciidoc @@ -1,5 +1,5 @@ [[elasticsearch-differences]] -= Differences from other {{es}} offerings += Differences from other {es} offerings ++++ Serverless differences ++++ @@ -7,11 +7,11 @@ // :description: Understand how {es-serverless} differs from Elastic Cloud Hosted and self-managed offerings. // :keywords: serverless, elasticsearch -<> handles all the infrastructure management for you, providing a fully managed {{es}} service. +<> handles all the infrastructure management for you, providing a fully managed {es} service. -If you've used {{es}} before, you'll notice some differences in how you work with the service on {serverless-full}, because a number of APIs and settings are not required for serverless projects. +If you've used {es} before, you'll notice some differences in how you work with the service on {serverless-full}, because a number of APIs and settings are not required for serverless projects. -This guide helps you understand what's different, what's available, and how to work effectively when running {{es}} on {serverless-full}. +This guide helps you understand what's different, what's available, and how to work effectively when running {es} on {serverless-full}. [discrete] [[elasticsearch-differences-serverless-infrastructure-management]] @@ -24,7 +24,7 @@ This guide helps you understand what's different, what's available, and how to w * Shard distribution and replication * Resource utilization and monitoring -This fully managed approach means many traditional {{es}} infrastructure APIs and settings are not available to end users, as detailed in the following sections. +This fully managed approach means many traditional {es} infrastructure APIs and settings are not available to end users, as detailed in the following sections. [discrete] [[elasticsearch-differences-serverless-index-size]] @@ -50,7 +50,7 @@ To ensure optimal performance, follow these recommendations for sizing individua For large datasets that exceed the recommended maximum size for a single index, consider splitting your data across smaller indices and using an alias to search them collectively. -These recommendations do not apply to indices using better binary quantization (BBQ). Refer to {ref}/dense-vector.html#dense-vector-quantization[vector quantization] in the core {{es}} docs for more information. +These recommendations do not apply to indices using better binary quantization (BBQ). Refer to {ref}/dense-vector.html#dense-vector-quantization[vector quantization] in the core {es} docs for more information. [discrete] [[elasticsearch-differences-serverless-apis-availability]] @@ -105,7 +105,7 @@ In {es-serverless}, you can only configure {ref}/index-modules.html#index-module Cluster-level settings and node-level settings are not required by end users and the `elasticsearch.yml` file is fully managed by Elastic. Available settings:: -*Index-level settings*: Settings that control how {{es}} documents are processed, stored, and searched are available to end users. These include: +*Index-level settings*: Settings that control how {es} documents are processed, stored, and searched are available to end users. These include: * Analysis configuration * Mapping parameters * Search/query settings diff --git a/serverless/pages/sign-up.asciidoc b/serverless/pages/sign-up.asciidoc index f2852cd0c3..d4dfcad875 100644 --- a/serverless/pages/sign-up.asciidoc +++ b/serverless/pages/sign-up.asciidoc @@ -44,7 +44,7 @@ During the free 14 day trial, Elastic provides access to one hosted deployment a * You can have one active deployment at a time * The deployment size is limited to 8GB RAM and approximately 360GB of storage, depending on the specified hardware profile * Machine learning nodes are available up to 4GB RAM -* Custom {{es}} plugins are not enabled +* Custom {es} plugins are not enabled To learn more about Elastic Cloud Hosted, check our https://www.elastic.co/guide/en/cloud/current/ec-getting-started.html[Elasticsearch Service documentation]. @@ -59,7 +59,7 @@ To learn more about Elastic Cloud Hosted, check our https://www.elastic.co/guide Subscribe to https://www.elastic.co/guide/en/cloud/current/ec-billing-details.html[Elastic Cloud] for the following benefits: -* Increased memory or storage for deployment components, such as {{es}} clusters, machine learning nodes, and APM server. +* Increased memory or storage for deployment components, such as {es} clusters, machine learning nodes, and APM server. * As many deployments and projects as you need. * Third availability zone for your deployments. * Access to additional features, such as cross-cluster search and cross-cluster replication. diff --git a/serverless/pages/transforms.asciidoc b/serverless/pages/transforms.asciidoc index e6505a8822..c561356f10 100644 --- a/serverless/pages/transforms.asciidoc +++ b/serverless/pages/transforms.asciidoc @@ -6,7 +6,7 @@ This content applies to: {es-badge} {obs-badge} {sec-badge} -{transforms-cap} enable you to convert existing {{es}} indices into summarized +{transforms-cap} enable you to convert existing {es} indices into summarized indices, which provide opportunities for new insights and analytics. For example, you can use {transforms} to pivot your data into entity-centric diff --git a/serverless/pages/welcome-to-serverless.asciidoc b/serverless/pages/welcome-to-serverless.asciidoc index c192c5cb59..5cce461ebf 100644 --- a/serverless/pages/welcome-to-serverless.asciidoc +++ b/serverless/pages/welcome-to-serverless.asciidoc @@ -1,8 +1,8 @@ -{serverless-full} is a fully managed solution that allows you to deploy and use Elastic for your use cases without managing the underlying infrastructure. It represents a shift in how you interact with {{es}} - instead of managing clusters, nodes, data tiers, and scaling, you create **serverless projects** that are fully managed and automatically scaled by Elastic. This abstraction of infrastructure decisions allows you to focus solely on gaining value and insight from your data. +{serverless-full} is a fully managed solution that allows you to deploy and use Elastic for your use cases without managing the underlying infrastructure. It represents a shift in how you interact with {es} - instead of managing clusters, nodes, data tiers, and scaling, you create **serverless projects** that are fully managed and automatically scaled by Elastic. This abstraction of infrastructure decisions allows you to focus solely on gaining value and insight from your data. -{serverless-full} automatically provisions, manages, and scales your {{es}} resources based on your actual usage. Unlike traditional deployments where you need to predict and provision resources in advance, serverless adapts to your workload in real-time, ensuring optimal performance while eliminating the need for manual capacity planning. +{serverless-full} automatically provisions, manages, and scales your {es} resources based on your actual usage. Unlike traditional deployments where you need to predict and provision resources in advance, serverless adapts to your workload in real-time, ensuring optimal performance while eliminating the need for manual capacity planning. -Serverless projects use the core components of the {stack}, such as {{es}} and {kib}, and are based on an architecture that +Serverless projects use the core components of the {stack}, such as {es} and {kib}, and are based on an architecture that decouples compute and storage. Search and indexing operations are separated, which offers high flexibility for scaling your workloads while ensuring a high level of performance. diff --git a/serverless/pages/what-is-elasticsearch-serverless.asciidoc b/serverless/pages/what-is-elasticsearch-serverless.asciidoc index 0c09539847..875015e4fb 100644 --- a/serverless/pages/what-is-elasticsearch-serverless.asciidoc +++ b/serverless/pages/what-is-elasticsearch-serverless.asciidoc @@ -9,14 +9,14 @@ .Understanding Elasticsearch on serverless [IMPORTANT] ==== -If you haven't used {{es}} before, first learn the basics in the https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-intro.html[core {{es}} documentation]. +If you haven't used {es} before, first learn the basics in the https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-intro.html[core {es} documentation]. ==== {es-serverless} is one of the three available project types on <>. This project type enables you to use the core functionality of {es}: searching, indexing, storing, and analyzing data of all shapes and sizes. -When using {{es}} on {serverless-full} you don’t need to worry about managing the infrastructure that keeps {{es}} distributed and available: nodes, shards, and replicas. These resources are completely automated on the serverless platform, which is designed to scale up and down with your workload. +When using {es} on {serverless-full} you don’t need to worry about managing the infrastructure that keeps {es} distributed and available: nodes, shards, and replicas. These resources are completely automated on the serverless platform, which is designed to scale up and down with your workload. This automation allows you to focus on building your search applications and solutions. @@ -28,19 +28,19 @@ This automation allows you to focus on building your search applications and sol |=== | 🚀 a| [.card-title]#<># + -Get started by creating your first {{es}} project on serverless. +Get started by creating your first {es} project on serverless. | 🔌 -a| [.card-title]#<># + +a| [.card-title]#<># + Learn how to connect your applications to your {es-serverless} endpoint. | ⤵️ a| [.card-title]#<># + -Learn how to get your data into {{es}} and start building your search application. +Learn how to get your data into {es} and start building your search application. | 🛝 a| [.card-title]#{kibana-ref}/playground.html[*Try Playground →*]# + -After you've added some data, use Playground to test out queries and combine {{es}} with the power of Generative AI in your applications. +After you've added some data, use Playground to test out queries and combine {es} with the power of Generative AI in your applications. |=== [discrete] @@ -51,9 +51,9 @@ After you've added some data, use Playground to test out queries and combine {{e |=== | ❓ a| [.card-title]#<># + -Understand the differences between {{es}} on {serverless-full} and other deployment types. +Understand the differences between {es} on {serverless-full} and other deployment types. | 🧾 a| [.card-title]#<># + -Learn about the billing model for {{es}} on {serverless-full}. +Learn about the billing model for {es} on {serverless-full}. |=== From 430a6aaf0ef9b3de8f4bd0f365f72ca9ddbab261 Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 13 Feb 2025 14:37:08 -0500 Subject: [PATCH 3/6] oops --- serverless/partials/field-mappings-dense-vector.asciidoc | 2 +- serverless/partials/field-mappings-elser.asciidoc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/serverless/partials/field-mappings-dense-vector.asciidoc b/serverless/partials/field-mappings-dense-vector.asciidoc index 42bded676b..3c1c7e5635 100644 --- a/serverless/partials/field-mappings-dense-vector.asciidoc +++ b/serverless/partials/field-mappings-dense-vector.asciidoc @@ -1,4 +1,4 @@ -The models compatible with {{es}} NLP generate dense vectors as output. The +The models compatible with {es} NLP generate dense vectors as output. The {ref}/dense-vector.html[`dense_vector`] field type is suitable for storing dense vectors of numeric values. The index must have a field with the `dense_vector` field type to index the embeddings that the supported third-party model that you diff --git a/serverless/partials/field-mappings-elser.asciidoc b/serverless/partials/field-mappings-elser.asciidoc index de47021333..e633b80b7c 100644 --- a/serverless/partials/field-mappings-elser.asciidoc +++ b/serverless/partials/field-mappings-elser.asciidoc @@ -1,5 +1,5 @@ ELSER produces token-weight pairs as output from the input text and the query. -The {{es}} {ref}/sparse-vector.html[`sparse_vector`] field type can store these +The {es} {ref}/sparse-vector.html[`sparse_vector`] field type can store these token-weight pairs as numeric feature vectors. The index must have a field with the `sparse_vector` field type to index the tokens that ELSER generates. From ed26acc4ee09a9d9b62af2f3ed5c8fa8b025f3f5 Mon Sep 17 00:00:00 2001 From: shainaraskas <58563081+shainaraskas@users.noreply.github.com> Date: Thu, 13 Feb 2025 14:42:10 -0500 Subject: [PATCH 4/6] Update explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md --- .../data-frame-analytics/ml-feature-importance.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md b/explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md index c1cf80a5d1..742c0387e9 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md @@ -29,7 +29,7 @@ If the {{classanalysis}} involves more than two classes, {{kib}} uses colors to You can also examine the feature importance values for each individual prediction. In {{kib}}, you can see these values in JSON objects or decision plots. For {{reganalysis}}, each decision plot starts at a shared baseline, which is the average of the prediction values for all the data points in the training data set. When you add all of the feature importance values for a particular data point to that baseline, you arrive at the numeric prediction value. If a {{feat-imp}} value is negative, it reduces the prediction value. If a {{feat-imp}} value is positive, it increases the prediction value. For example: :::{image} ../../../images/machine-learning-flights-regression-decision-plot.png -:alt: Feature importance values for a {{regression}} {{dfanalytics-job}} in {kib} +:alt: Feature importance values for a {{regression}} {{dfanalytics-job}} in {{kib}} :class: screenshot ::: From 162ebf884cc40b1429edade91d95fc3ae5d584fe Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 13 Feb 2025 14:44:48 -0500 Subject: [PATCH 5/6] more --- .../install-elasticsearch-with-docker.md | 2 +- .../deploy/self-managed/install-with-docker.md | 2 +- .../ccr-getting-started-auto-follow.md | 2 +- .../ccr-getting-started-follower-index.md | 2 +- .../alerts-cases/alerts/maintenance-windows.md | 2 +- .../alerts/rule-type-index-threshold.md | 2 +- .../anomaly-detection/geographic-anomalies.md | 4 ++-- .../anomaly-detection/mapping-anomalies.md | 2 +- .../anomaly-detection/ml-ad-explain.md | 4 ++-- .../anomaly-detection/ml-ad-view-results.md | 2 +- .../ml-configuring-transform.md | 2 +- .../anomaly-detection/ml-configuring-url.md | 2 +- .../ml-dfa-classification.md | 16 ++++++++-------- .../data-frame-analytics/ml-dfa-custom-urls.md | 2 +- .../ml-dfa-finding-outliers.md | 8 ++++---- .../data-frame-analytics/ml-dfa-regression.md | 16 ++++++++-------- .../ml-feature-importance.md | 4 ++-- .../data-frame-analytics/ml-trained-models.md | 2 +- .../machine-learning-in-kibana.md | 2 +- .../xpack-ml-dfanalytics.md | 2 +- .../setting-up-machine-learning.md | 2 +- .../transforms/ecommerce-transforms.md | 12 ++++++------ explore-analyze/transforms/transform-examples.md | 6 +++--- explore-analyze/transforms/transform-overview.md | 4 ++-- .../visualize/custom-visualizations-with-vega.md | 2 +- .../elasticsearch-reference/saml-guide-stack.md | 2 +- .../kibana/kibana/connect-to-elasticsearch.md | 2 +- .../observability-docs/observability/apm.md | 2 +- .../observability/logs-metrics-get-started.md | 2 +- .../observability/monitor-uptime-synthetics.md | 2 +- .../observability/synthetics-analyze.md | 2 +- .../observability/synthetics-params-secrets.md | 2 +- .../observability/synthetics-settings.md | 10 +++++----- .../security-docs/security/rules-ui-create.md | 2 +- .../elastic-stack/installing-stack-demo-self.md | 2 +- .../apps/uptime-monitoring-deprecated.md | 2 +- solutions/observability/cicd.md | 6 +++--- ...web-services-aws-with-amazon-data-firehose.md | 2 +- ...ices-aws-with-elastic-serverless-forwarder.md | 2 +- .../cloud/monitor-aws-network-firewall-logs.md | 2 +- .../cloud/monitor-cloudtrail-logs.md | 4 ++-- .../cloud/monitor-cloudwatch-logs.md | 4 ++-- .../monitor-web-application-firewall-waf-logs.md | 2 +- .../monitor-aws-with-amazon-data-firehose.md | 2 +- 44 files changed, 80 insertions(+), 80 deletions(-) diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md index 118c7f4d74..055030956a 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md @@ -206,7 +206,7 @@ docker network rm elastic docker rm es01 docker rm es02 -# Remove the {kib} container +# Remove the {{kib}} container docker rm kib01 ``` diff --git a/deploy-manage/deploy/self-managed/install-with-docker.md b/deploy-manage/deploy/self-managed/install-with-docker.md index 9ddae78037..628f8357e2 100644 --- a/deploy-manage/deploy/self-managed/install-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-with-docker.md @@ -154,7 +154,7 @@ docker network rm elastic # Remove the {{es}} container docker rm es01 -# Remove the {kib} container +# Remove the {{kib}} container docker rm kib01 ``` diff --git a/deploy-manage/tools/cross-cluster-replication/ccr-getting-started-auto-follow.md b/deploy-manage/tools/cross-cluster-replication/ccr-getting-started-auto-follow.md index c935d3870d..7bc674cc10 100644 --- a/deploy-manage/tools/cross-cluster-replication/ccr-getting-started-auto-follow.md +++ b/deploy-manage/tools/cross-cluster-replication/ccr-getting-started-auto-follow.md @@ -20,7 +20,7 @@ To create an auto-follow pattern from Stack Management in {{kib}}: As new indices matching these patterns are created on the remote, {{es}} automatically replicates them to local follower indices. :::{image} ../../../images/elasticsearch-reference-auto-follow-patterns.png -:alt: The Auto-follow patterns page in {kib} +:alt: The Auto-follow patterns page in {{kib}} :class: screenshot ::: diff --git a/deploy-manage/tools/cross-cluster-replication/ccr-getting-started-follower-index.md b/deploy-manage/tools/cross-cluster-replication/ccr-getting-started-follower-index.md index fa7e41c29f..2ff419fa31 100644 --- a/deploy-manage/tools/cross-cluster-replication/ccr-getting-started-follower-index.md +++ b/deploy-manage/tools/cross-cluster-replication/ccr-getting-started-follower-index.md @@ -19,7 +19,7 @@ To create a follower index from Stack Management in {{kib}}: When you index documents into your leader index, {{es}} replicates the documents in the follower index. :::{image} ../../../images/elasticsearch-reference-ccr-follower-index.png -:alt: The Cross-Cluster Replication page in {kib} +:alt: The Cross-Cluster Replication page in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/alerts-cases/alerts/maintenance-windows.md b/explore-analyze/alerts-cases/alerts/maintenance-windows.md index 2ce2202257..c0d8ee8f11 100644 --- a/explore-analyze/alerts-cases/alerts/maintenance-windows.md +++ b/explore-analyze/alerts-cases/alerts/maintenance-windows.md @@ -41,7 +41,7 @@ In **Management > {{stack-manage-app}} > Maintenance Windows** or **{{project-se When you create a maintenance window, you must provide a name and a schedule. You can optionally configure it to repeat daily, monthly, yearly, or on a custom interval. :::{image} ../../../images/kibana-create-maintenance-window.png -:alt: The Create Maintenance Window user interface in {kib} +:alt: The Create Maintenance Window user interface in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/alerts-cases/alerts/rule-type-index-threshold.md b/explore-analyze/alerts-cases/alerts/rule-type-index-threshold.md index e3dc363b20..a21835e48a 100644 --- a/explore-analyze/alerts-cases/alerts/rule-type-index-threshold.md +++ b/explore-analyze/alerts-cases/alerts/rule-type-index-threshold.md @@ -18,7 +18,7 @@ In **{{stack-manage-app}}** > **{{rules-ui}}**, click **Create rule**. Select th When you create an index threshold rule, you must define the conditions for the rule to detect. For example: :::{image} ../../../images/kibana-rule-types-index-threshold-conditions.png -:alt: Defining index threshold rule conditions in {kib} +:alt: Defining index threshold rule conditions in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/anomaly-detection/geographic-anomalies.md b/explore-analyze/machine-learning/anomaly-detection/geographic-anomalies.md index 8553bf12be..32dd390ec8 100644 --- a/explore-analyze/machine-learning/anomaly-detection/geographic-anomalies.md +++ b/explore-analyze/machine-learning/anomaly-detection/geographic-anomalies.md @@ -46,7 +46,7 @@ To create an {{anomaly-job}} that uses the `lat_long` function, in {{kib}} you m For example, create a job that analyzes the sample eCommerce orders data set to find orders with unusual coordinates (`geoip.location` values) relative to the past behavior of each customer (`user` ID): :::{image} ../../../images/machine-learning-ecommerce-advanced-wizard-geopoint.jpg -:alt: A screenshot of creating an {{anomaly-job}} using the eCommerce data in {kib} +:alt: A screenshot of creating an {{anomaly-job}} using the eCommerce data in {{kib}} :class: screenshot ::: @@ -107,7 +107,7 @@ POST _ml/datafeeds/datafeed-ecommerce-geo/_start <4> Alternatively, create a job that analyzes the sample web logs data set to detect events with unusual coordinates (`geo.coordinates` values) or unusually high sums of transferred data (`bytes` values): :::{image} ../../../images/machine-learning-weblogs-advanced-wizard-geopoint.jpg -:alt: A screenshot of creating an {{anomaly-job}} using the web logs data in {kib} +:alt: A screenshot of creating an {{anomaly-job}} using the web logs data in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/anomaly-detection/mapping-anomalies.md b/explore-analyze/machine-learning/anomaly-detection/mapping-anomalies.md index d9710a00ef..841a3b0746 100644 --- a/explore-analyze/machine-learning/anomaly-detection/mapping-anomalies.md +++ b/explore-analyze/machine-learning/anomaly-detection/mapping-anomalies.md @@ -32,7 +32,7 @@ To create an {{anomaly-job}} in {{kib}}, click **Create job** on the **{{ml-cap} For example, use the multi-metric job wizard to create a job that analyzes the sample web logs data set to detect anomalous behavior in the sum of the data transferred (`bytes` values) for each destination country (`geo.dest` values): :::{image} ../../../images/machine-learning-weblogs-multimetric-wizard-vector.png -:alt: A screenshot of creating an {{anomaly-job}} using the web logs data in {kib} +:alt: A screenshot of creating an {{anomaly-job}} using the web logs data in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-ad-explain.md b/explore-analyze/machine-learning/anomaly-detection/ml-ad-explain.md index cb87b497bc..b09e687c3a 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-ad-explain.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-ad-explain.md @@ -41,7 +41,7 @@ Anomaly scores are in the range of 0 and 100. The values close to 100 signify th The process when the anomaly detection algorithm adjusts the anomaly scores of past records when new data comes in is called *renormalization*. The `renormalization_window_days` configuration parameter specifies the time interval for this adjustment. The **Single Metric Viewer** in Kibana highlights the renormalization change. :::{image} ../../../images/machine-learning-renormalization-score-reduction.jpg -:alt: Example of a record score reduction in {kib} +:alt: Example of a record score reduction in {{kib}} :class: screenshot ::: @@ -54,7 +54,7 @@ Two more factors may lead to a reduction of the initial score: a high variance i Real-world anomalies often show the impacts of several factors. The **Anomaly explanation** section in the Single Metric Viewer can help you interpret an anomaly in its context. :::{image} ../../../images/machine-learning-detailed-single-metric.jpg -:alt: Detailed view of the Single Metric Viewer in {kib} +:alt: Detailed view of the Single Metric Viewer in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-ad-view-results.md b/explore-analyze/machine-learning/anomaly-detection/ml-ad-view-results.md index 28329242ae..9cfb4db7c8 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-ad-view-results.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-ad-view-results.md @@ -24,7 +24,7 @@ When you view your {{ml}} results, each bucket has an anomaly score. This score The {{ml}} analytics enhance the anomaly score for each bucket by considering contiguous buckets. This extra *multi-bucket analysis* effectively uses a sliding window to evaluate the events in each bucket relative to the larger context of recent events. When you review your {{ml}} results, there is a `multi_bucket_impact` property that indicates how strongly the final anomaly score is influenced by multi-bucket analysis. In {{kib}}, anomalies with medium or high multi-bucket impact are depicted in the **Anomaly Explorer** and the **Single Metric Viewer** with a cross symbol instead of a dot. For example: :::{image} ../../../images/machine-learning-multibucketanalysis.jpg -:alt: Examples of anomalies with multi-bucket impact in {kib} +:alt: Examples of anomalies with multi-bucket impact in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-transform.md b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-transform.md index c13b19a4f8..fa28dfcc9b 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-transform.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-transform.md @@ -140,7 +140,7 @@ This example demonstrates how to use runtime fields, but it contains insufficien You can alternatively use {{kib}} to create an advanced {{anomaly-job}} that uses runtime fields. To add the `runtime_mappings` property to your {{dfeed}}, you must use the **Edit JSON** tab. For example: :::{image} ../../../images/machine-learning-ml-runtimefields.jpg -:alt: Using runtime_mappings in {{dfeed}} config via {kib} +:alt: Using runtime_mappings in {{dfeed}} config via {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-url.md b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-url.md index e598f4cd09..1f24b8b924 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-configuring-url.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-configuring-url.md @@ -18,7 +18,7 @@ You can optionally attach one or more custom URLs to your {{anomaly-jobs}}. Thes When you create or edit an {{anomaly-job}} in {{kib}}, it simplifies the creation of the custom URLs for {{kib}} dashboards and the **Discover** app and it enables you to test your URLs. For example: :::{image} ../../../images/machine-learning-ml-customurl-edit.gif -:alt: Add a custom URL in {kib} +:alt: Add a custom URL in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-classification.md b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-classification.md index 71306c3d04..5a1d33f533 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-classification.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-classification.md @@ -161,7 +161,7 @@ The model that you created is stored as {{es}} documents in internal indices. In 1. To deploy {{dfanalytics}} model in a pipeline, navigate to **Machine Learning** > **Model Management** > **Trained models** in the main menu, or use the [global search field](../../find-and-organize/find-apps-and-objects.md) in {{kib}}. 2. Find the model you want to deploy in the list and click **Deploy model** in the **Actions** menu. :::{image} ../../../images/machine-learning-ml-dfa-trained-models-ui.png - :alt: The trained models UI in {kib} + :alt: The trained models UI in {{kib}} :class: screenshot ::: @@ -281,7 +281,7 @@ To predict whether a specific flight is delayed: 1. Create a {{dfanalytics-job}}. You can use the wizard on the **{{ml-app}}** > **Data Frame Analytics** tab in {{kib}} or the [create {{dfanalytics-jobs}}](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics) API. :::{image} ../../../images/machine-learning-flights-classification-job-1.jpg - :alt: Creating a {{dfanalytics-job}} in {kib} + :alt: Creating a {{dfanalytics-job}} in {{kib}} :class: screenshot ::: @@ -291,7 +291,7 @@ To predict whether a specific flight is delayed: 4. Add `Cancelled`, `FlightDelayMin`, and `FlightDelayType` to the list of excluded fields. It is recommended to exclude fields that either contain erroneous data or describe the `dependent_variable`. The wizard includes a scatterplot matrix, which enables you to explore the relationships between the numeric fields. The color of each point is affected by the value of the {{depvar}} for that document, as shown in the legend. You can highlight an area in one of the charts and the corresponding area is also highlighted in the rest of the charts. You can use this matrix to help you decide which fields to include or exclude. :::{image} ../../../images/machine-learning-flights-classification-scatterplot.png - :alt: A scatterplot matrix for three fields in {kib} + :alt: A scatterplot matrix for three fields in {{kib}} :class: screenshot ::: If you want these charts to represent data from a larger sample size or from a randomized selection of documents, you can change the default behavior. However, a larger sample size might slow down the performance of the matrix and a randomized selection might put more load on the cluster due to the more intensive query. @@ -356,7 +356,7 @@ POST _ml/data_frame/analytics/model-flight-delays-classification/_start 3. Check the job stats to follow the progress in {{kib}} or use the [get {{dfanalytics-jobs}} statistics API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats). :::{image} ../../../images/machine-learning-flights-classification-details.jpg -:alt: Statistics for a {{dfanalytics-job}} in {kib} +:alt: Statistics for a {{dfanalytics-job}} in {{kib}} :class: screenshot ::: @@ -465,7 +465,7 @@ Now you have a new index that contains a copy of your source data with predictio When you view the {{classification}} results in {{kib}}, it shows the contents of the destination index in a tabular format. It also provides information about the analysis details, model evaluation metrics, total {{feat-imp}} values, and a scatterplot matrix. :::{image} ../../../images/machine-learning-flights-classification-results.jpg -:alt: Destination index table for a classification job in {kib} +:alt: Destination index table for a classification job in {{kib}} :class: screenshot ::: @@ -517,14 +517,14 @@ The class with the highest score is the prediction. In this example, `false` has If you chose to calculate {{feat-imp}}, the destination index also contains `ml.feature_importance` objects. Every field that is included in the analysis (known as a *feature* of the data point) is assigned a {{feat-imp}} value. It has both a magnitude and a direction (positive or negative), which indicates how each field affects a particular prediction. Only the most significant values (in this case, the top 10) are stored in the index. However, the trained model metadata also contains the average magnitude of the {{feat-imp}} values for each field across all the training data. You can view this summarized information in {{kib}}: :::{image} ../../../images/machine-learning-flights-classification-total-importance.jpg -:alt: Total {{feat-imp}} values in {kib} +:alt: Total {{feat-imp}} values in {{kib}} :class: screenshot ::: You can also see the {{feat-imp}} values for each individual prediction in the form of a decision plot: :::{image} ../../../images/machine-learning-flights-classification-importance.png -:alt: A decision plot for {{feat-imp}} values in {kib} +:alt: A decision plot for {{feat-imp}} values in {{kib}} :class: screenshot ::: @@ -672,7 +672,7 @@ Though you can look at individual results and compare the predicted value (`ml.F {{kib}} provides a *normalized confusion matrix* that contains the percentage of occurrences where the analysis classified data points correctly with their actual class and the percentage of occurrences where it misclassified them. :::{image} ../../../images/machine-learning-flights-classification-evaluation.png -:alt: Evaluation of a classification job in {kib} +:alt: Evaluation of a classification job in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-custom-urls.md b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-custom-urls.md index 0e278dea03..8b23fa5dc7 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-custom-urls.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-custom-urls.md @@ -18,7 +18,7 @@ You can optionally attach one or more custom URLs to your {{dfanalytics-jobs}}. When you create or edit an {{dfanalytics-job}} in {{kib}}, it simplifies the creation of the custom URLs for {{kib}} dashboards and the **Discover** app and it enables you to test your URLs. For example: :::{image} ../../../images/machine-learning-ml-dfa-custom-url-edit.png -:alt: Add a custom URL in {kib} +:alt: Add a custom URL in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-finding-outliers.md b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-finding-outliers.md index e0f0483391..34a7b95001 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-finding-outliers.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-finding-outliers.md @@ -121,7 +121,7 @@ The goal of {{oldetection}} is to find the most unusual documents in an index. L In particular, create a {{transform}} that calculates the number of occasions when a specific client IP communicated with the network (`@timestamp.value_count`), the sum of the bytes that are exchanged between the network and the client’s machine (`bytes.sum`), the maximum exchanged bytes during a single occasion (`bytes.max`), and the total number of requests (`request.value_count`) initiated by a specific client IP. You can preview the {{transform}} before you create it in **{{stack-manage-app}}** > **Transforms**: :::{image} ../../../images/machine-learning-logs-transform-preview.jpg - :alt: Creating a {{transform}} in {kib} + :alt: Creating a {{transform}} in {{kib}} :class: screenshot ::: Alternatively, you can use the [preview {{transform}} API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform) and the [create {{transform}} API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform). @@ -238,13 +238,13 @@ POST _transform/logs-by-clientip/_start In the wizard on the **Machine Learning** > **Data Frame Analytics** page in {{kib}}, select your new {{data-source}} then use the default values for {{oldetection}}. For example: :::{image} ../../../images/machine-learning-weblog-outlier-job-1.jpg - :alt: Create a {{dfanalytics-job}} in {kib} + :alt: Create a {{dfanalytics-job}} in {{kib}} :class: screenshot ::: The wizard includes a scatterplot matrix, which enables you to explore the relationships between the fields. You can use that information to help you decide which fields to include or exclude from the analysis. :::{image} ../../../images/machine-learning-weblog-outlier-scatterplot.jpg - :alt: A scatterplot matrix for three fields in {kib} + :alt: A scatterplot matrix for three fields in {{kib}} :class: screenshot ::: @@ -292,7 +292,7 @@ PUT _ml/data_frame/analytics/weblog-outliers The {{dfanalytics}} job creates an index that contains the original data and {{olscores}} for each document. The {{olscore}} indicates how different each entity is from other entities. In {{kib}}, you can view the results from the {{dfanalytics}} job and sort them on the outlier score: :::{image} ../../../images/machine-learning-outliers.jpg - :alt: View {{oldetection}} results in {kib} + :alt: View {{oldetection}} results in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-regression.md b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-regression.md index bd1f1e1584..320336d6a7 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-regression.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-dfa-regression.md @@ -107,7 +107,7 @@ The model that you created is stored as {{es}} documents in internal indices. In 1. To deploy {{dfanalytics}} model in a pipeline, navigate to **Machine Learning** > **Model Management** > **Trained models** in the main menu, or use the [global search field](../../find-and-organize/find-apps-and-objects.md) in {{kib}}. 2. Find the model you want to deploy in the list and click **Deploy model** in the **Actions** menu. :::{image} ../../../images/machine-learning-ml-dfa-trained-models-ui.png - :alt: The trained models UI in {kib} + :alt: The trained models UI in {{kib}} :class: screenshot ::: @@ -224,7 +224,7 @@ To predict the number of minutes delayed for each flight: 2. Create a {{dfanalytics-job}}. You can use the wizard on the **{{ml-app}}** > **Data Frame Analytics** tab in {{kib}} or the [create {{dfanalytics-jobs}}](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics) API. :::{image} ../../../images/machine-learning-flights-regression-job-1.jpg - :alt: Creating a {{dfanalytics-job}} in {kib} + :alt: Creating a {{dfanalytics-job}} in {{kib}} :class: screenshot ::: 1. Choose `kibana_sample_data_flights` as the source index. @@ -234,7 +234,7 @@ To predict the number of minutes delayed for each flight: 5. Add `Cancelled`, `FlightDelay`, and `FlightDelayType` to the list of excluded fields. These fields will be excluded from the analysis. It is recommended to exclude fields that either contain erroneous data or describe the `dependent_variable`. The wizard includes a scatterplot matrix, which enables you to explore the relationships between the numeric fields. The color of each point is affected by the value of the {{depvar}} for that document, as shown in the legend. You can highlight an area in one of the charts and the corresponding area is also highlighted in the rest of the chart. You can use this matrix to help you decide which fields to include or exclude from the analysis. :::{image} ../../../images/machine-learning-flightdata-regression-scatterplot.png - :alt: A scatterplot matrix for three fields in {kib} + :alt: A scatterplot matrix for three fields in {{kib}} :class: screenshot ::: If you want these charts to represent data from a larger sample size or from a randomized selection of documents, you can change the default behavior. However, a larger sample size might slow down the performance of the matrix and a randomized selection might put more load on the cluster due to the more intensive query. @@ -304,7 +304,7 @@ POST _ml/data_frame/analytics/model-flight-delays-regression/_start 4. Check the job stats to follow the progress in {{kib}} or use the [get {{dfanalytics-jobs}} statistics API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats). :::{image} ../../../images/machine-learning-flights-regression-details.jpg -:alt: Statistics for a {{dfanalytics-job}} in {kib} +:alt: Statistics for a {{dfanalytics-job}} in {{kib}} :class: screenshot ::: @@ -412,7 +412,7 @@ Now you have a new index that contains a copy of your source data with predictio When you view the results in {{kib}}, it shows the contents of the destination index in a tabular format. It also provides information about the analysis details, model evaluation metrics, total feature importance values, and a scatterplot matrix. Let’s start by looking at the results table: :::{image} ../../../images/machine-learning-flights-regression-results.jpg -:alt: Results for a {{dfanalytics-job}} in {kib} +:alt: Results for a {{dfanalytics-job}} in {{kib}} :class: screenshot ::: @@ -421,14 +421,14 @@ In this example, the table shows a column for the {{depvar}} (`FlightDelayMin`), If you chose to calculate {{feat-imp}}, the destination index also contains `ml.feature_importance` objects. Every field that is included in the {{reganalysis}} (known as a *feature* of the data point) is assigned a {{feat-imp}} value. This value has both a magnitude and a direction (positive or negative), which indicates how each field affects a particular prediction. Only the most significant values (in this case, the top 5) are stored in the index. However, the trained model metadata also contains the average magnitude of the {{feat-imp}} values for each field across all the training data. You can view this summarized information in {{kib}}: :::{image} ../../../images/machine-learning-flights-regression-total-importance.jpg -:alt: Total {{feat-imp}} values in {kib} +:alt: Total {{feat-imp}} values in {{kib}} :class: screenshot ::: You can also see the {{feat-imp}} values for each individual prediction in the form of a decision plot: :::{image} ../../../images/machine-learning-flights-regression-importance.png -:alt: A decision plot for {{feat-imp}} values in {kib} +:alt: A decision plot for {{feat-imp}} values in {{kib}} :class: screenshot ::: @@ -534,7 +534,7 @@ Though you can look at individual results and compare the predicted value (`ml.F {{kib}} provides *training error* metrics, which represent how well the model performed on the training data set. It also provides *generalization error* metrics, which represent how well the model performed on testing data. :::{image} ../../../images/machine-learning-flights-regression-evaluation.jpg -:alt: Evaluating {{reganalysis}} results in {kib} +:alt: Evaluating {{reganalysis}} results in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md b/explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md index 742c0387e9..1cc37caba1 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-feature-importance.md @@ -15,14 +15,14 @@ The purpose of {{feat-imp}} is to help you determine whether the predictions are You can see the average magnitude of the {{feat-imp}} values for each field across all the training data in {{kib}} or by using the [get trained model API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models). For example, {{kib}} shows the total feature importance for each field in {{regression}} or binary {{classanalysis}} results as follows: :::{image} ../../../images/machine-learning-flights-regression-total-importance.jpg -:alt: Total {{feat-imp}} values for a {{regression}} {{dfanalytics-job}} in {kib} +:alt: Total {{feat-imp}} values for a {{regression}} {{dfanalytics-job}} in {{kib}} :class: screenshot ::: If the {{classanalysis}} involves more than two classes, {{kib}} uses colors to show how the impact of each field varies by class. For example: :::{image} ../../../images/machine-learning-diamonds-classification-total-importance.png -:alt: Total {{feat-imp}} values for a {{classification}} {{dfanalytics-job}} in {kib} +:alt: Total {{feat-imp}} values for a {{classification}} {{dfanalytics-job}} in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-trained-models.md b/explore-analyze/machine-learning/data-frame-analytics/ml-trained-models.md index 99b248c66f..fbc7fa983b 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-trained-models.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-trained-models.md @@ -23,7 +23,7 @@ Alternatively, you can use APIs like [get trained models](https://www.elastic.co 2. Find the model you want to deploy in the list and click **Deploy model** in the **Actions** menu. :::{image} ../../../images/machine-learning-ml-dfa-trained-models-ui.png -:alt: The trained models UI in {kib} +:alt: The trained models UI in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/machine-learning-in-kibana.md b/explore-analyze/machine-learning/machine-learning-in-kibana.md index a13b6cc00b..5525d86f97 100644 --- a/explore-analyze/machine-learning/machine-learning-in-kibana.md +++ b/explore-analyze/machine-learning/machine-learning-in-kibana.md @@ -52,7 +52,7 @@ This functionality is in technical preview and may be changed or removed in a fu You can find the data drift view in **{{ml-app}}** > **{{data-viz}}** in {{kib}} or by using the [global search field](../../explore-analyze/find-and-organize/find-apps-and-objects.md). The data drift view shows you the differences in each field for two different time ranges in a given {{data-source}}. The view helps you to visualize the changes in your data over time and enables you to understand its behavior better. :::{image} ../../images/kibana-ml-data-drift.png -:alt: Data drift view in {kib} +:alt: Data drift view in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/machine-learning-in-kibana/xpack-ml-dfanalytics.md b/explore-analyze/machine-learning/machine-learning-in-kibana/xpack-ml-dfanalytics.md index 04f7688358..61c7e2c942 100644 --- a/explore-analyze/machine-learning/machine-learning-in-kibana/xpack-ml-dfanalytics.md +++ b/explore-analyze/machine-learning/machine-learning-in-kibana/xpack-ml-dfanalytics.md @@ -13,7 +13,7 @@ The Elastic {{ml}} {{dfanalytics}} feature enables you to analyze your data usin If you have a license that includes the {{ml-features}}, you can create {{dfanalytics}} jobs and view their results on the **Data Frame Analytics** page in {{kib}}. For example: :::{image} ../../../images/kibana-classification.png -:alt: {{classification-cap}} results in {kib} +:alt: {{classification-cap}} results in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/setting-up-machine-learning.md b/explore-analyze/machine-learning/setting-up-machine-learning.md index 965d8addef..e86708a745 100644 --- a/explore-analyze/machine-learning/setting-up-machine-learning.md +++ b/explore-analyze/machine-learning/setting-up-machine-learning.md @@ -69,7 +69,7 @@ Granting `All` or `Read` {{kib}} feature privilege for {{ml-app}} will also gran In {{kib}}, the {{ml-features}} must be visible in your [space](../../deploy-manage/manage-spaces.md#spaces-control-feature-visibility). To manage which features are visible in your space, go to **{{stack-manage-app}}** > **{{kib}}** > **Spaces** or use the [global search field](../find-and-organize/find-apps-and-objects.md) to locate **Spaces** directly. :::{image} ../../images/machine-learning-spaces.jpg -:alt: Manage spaces in {kib} +:alt: Manage spaces in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/transforms/ecommerce-transforms.md b/explore-analyze/transforms/ecommerce-transforms.md index 0a47bf94cb..6d65038eff 100644 --- a/explore-analyze/transforms/ecommerce-transforms.md +++ b/explore-analyze/transforms/ecommerce-transforms.md @@ -23,14 +23,14 @@ mapped_pages: Go to **Management** > **Stack Management** > **Data** > **Transforms** in {{kib}} and use the wizard to create a {{transform}}: :::{image} ../../images/elasticsearch-reference-ecommerce-pivot1.png - :alt: Creating a simple {{transform}} in {kib} + :alt: Creating a simple {{transform}} in {{kib}} :class: screenshot ::: Group the data by customer ID and add one or more aggregations to learn more about each customer’s orders. For example, let’s calculate the sum of products they purchased, the total price of their purchases, the maximum number of products that they purchased in a single order, and their total number of orders. We’ll accomplish this by using the [`sum` aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html) on the `total_quantity` and `taxless_total_price` fields, the [`max` aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html) on the `total_quantity` field, and the [`cardinality` aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html) on the `order_id` field: :::{image} ../../images/elasticsearch-reference-ecommerce-pivot2.png - :alt: Adding multiple aggregations to a {{transform}} in {kib} + :alt: Adding multiple aggregations to a {{transform}} in {{kib}} :class: screenshot ::: @@ -95,7 +95,7 @@ mapped_pages: 2. Decide whether you want the {{transform}} to run once or continuously. Since this sample data index is unchanging, let’s use the default behavior and just run the {{transform}} once. If you want to try it out, however, go ahead and click on **Continuous mode**. You must choose a field that the {{transform}} can use to check which entities have changed. In general, it’s a good idea to use the ingest timestamp field. In this example, however, you can use the `order_date` field. 3. Optionally, you can configure a retention policy that applies to your {{transform}}. Select a date field that is used to identify old documents in the destination index and provide a maximum age. Documents that are older than the configured value are removed from the destination index. :::{image} ../../images/elasticsearch-reference-ecommerce-pivot3.png - :alt: Adding transfrom ID and retention policy to a {{transform}} in {kib} + :alt: Adding transfrom ID and retention policy to a {{transform}} in {{kib}} :class: screenshot ::: @@ -295,7 +295,7 @@ mapped_pages: You can start, stop, reset, and manage {{transforms}} in {{kib}}: :::{image} ../../images/elasticsearch-reference-manage-transforms.png - :alt: Managing {{transforms}} in {kib} + :alt: Managing {{transforms}} in {{kib}} :class: screenshot ::: @@ -317,14 +317,14 @@ mapped_pages: 7. Explore the data in your new index. For example, use the **Discover** application in {{kib}}: :::{image} ../../images/elasticsearch-reference-ecommerce-results.png - :alt: Exploring the new index in {kib} + :alt: Exploring the new index in {{kib}} :class: screenshot ::: 8. Optional: Create another {{transform}}, this time using the `latest` method. This method populates the destination index with the latest documents for each unique key value. For example, you might want to find the latest orders (sorted by the `order_date` field) for each customer or for each country and region. :::{image} ../../images/elasticsearch-reference-ecommerce-latest1.png - :alt: Creating a latest {{transform}} in {kib} + :alt: Creating a latest {{transform}} in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/transforms/transform-examples.md b/explore-analyze/transforms/transform-examples.md index 143e18155a..45e9b67d26 100644 --- a/explore-analyze/transforms/transform-examples.md +++ b/explore-analyze/transforms/transform-examples.md @@ -23,7 +23,7 @@ These examples demonstrate how to use {{transforms}} to derive useful insights f This example uses the eCommerce orders sample data set to find the customers who spent the most in a hypothetical webshop. Let’s use the `pivot` type of {{transform}} such that the destination index contains the number of orders, the total price of the orders, the amount of unique products and the average price per order, and the total amount of ordered products for each customer. :::{image} ../../images/elasticsearch-reference-transform-ex1-1.jpg -:alt: Finding your best customers with {{transforms}} in {kib} +:alt: Finding your best customers with {{transforms}} in {{kib}} :class: screenshot ::: @@ -291,14 +291,14 @@ This example uses the web log sample data set to find the last log from an IP ad Pick the `clientip` field as the unique key; the data is grouped by this field. Select `timestamp` as the date field that sorts the data chronologically. For continuous mode, specify a date field that is used to identify new documents, and an interval between checks for changes in the source index. :::{image} ../../images/elasticsearch-reference-transform-ex4-1.jpg -:alt: Finding the last log event for each IP address with {{transforms}} in {kib} +:alt: Finding the last log event for each IP address with {{transforms}} in {{kib}} :class: screenshot ::: Let’s assume that we’re interested in retaining documents only for IP addresses that appeared recently in the log. You can define a retention policy and specify a date field that is used to calculate the age of a document. This example uses the same date field that is used to sort the data. Then set the maximum age of a document; documents that are older than the value you set will be removed from the destination index. :::{image} ../../images/elasticsearch-reference-transform-ex4-2.jpg -:alt: Defining retention policy for {{transforms}} in {kib} +:alt: Defining retention policy for {{transforms}} in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/transforms/transform-overview.md b/explore-analyze/transforms/transform-overview.md index 2094e1e04d..f29f08b1dd 100644 --- a/explore-analyze/transforms/transform-overview.md +++ b/explore-analyze/transforms/transform-overview.md @@ -41,7 +41,7 @@ Imagine that you run a webshop that sells clothes. Every order creates a documen If you want to check the sales in the different categories in your last fiscal year, define a {{transform}} that groups the data by the product categories (women’s shoes, men’s clothing, etc.) and the order date. Use the last year as the interval for the order date. Then add a sum aggregation on the ordered quantity. The result is an entity-centric index that shows the number of sold items in every product category in the last year. :::{image} ../../images/elasticsearch-reference-pivot-preview.png -:alt: Example of a pivot {{transform}} preview in {kib} +:alt: Example of a pivot {{transform}} preview in {{kib}} :class: screenshot ::: @@ -50,7 +50,7 @@ If you want to check the sales in the different categories in your last fiscal y You can use the `latest` type of {{transform}} to copy the most recent documents into a new index. You must identify one or more fields as the unique key for grouping your data, as well as a date field that sorts the data chronologically. For example, you can use this type of {{transform}} to keep track of the latest purchase for each customer or the latest event for each host. :::{image} ../../images/elasticsearch-reference-latest-preview.png -:alt: Example of a latest {{transform}} preview in {kib} +:alt: Example of a latest {{transform}} preview in {{kib}} :class: screenshot ::: diff --git a/explore-analyze/visualize/custom-visualizations-with-vega.md b/explore-analyze/visualize/custom-visualizations-with-vega.md index 0cf6b0d15d..f2ba8a8f9c 100644 --- a/explore-analyze/visualize/custom-visualizations-with-vega.md +++ b/explore-analyze/visualize/custom-visualizations-with-vega.md @@ -1119,7 +1119,7 @@ Learn more about {{kib}} extension, additional **Vega** resources, and examples. {{kib}} has extended Vega and Vega-Lite with extensions that support: * Automatic sizing -* Default theme to match {kib} +* Default theme to match {{kib}} * Writing {{es}} queries using the time range and filters from dashboards * [preview] Using the Elastic Map Service in Vega maps * Additional tooltip styling diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/saml-guide-stack.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/saml-guide-stack.md index 81bce5493f..45054367cb 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/saml-guide-stack.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/saml-guide-stack.md @@ -287,7 +287,7 @@ The possible solutions to this problem are: * Ask your IdP administrator or vendor to provide a Single Logout service * If your Idp does provide a Single Logout Service, make sure it is included in the IdP metadata file, and do *not* set `idp.use_single_logout` to `false`. -* Advise your users to close their browser after logging out of {kib} +* Advise your users to close their browser after logging out of {{kib}} * Enable the `force_authn` setting on your SAML realm. This setting causes the Elastic Stack to request fresh authentication from the IdP every time a user attempts to log in to {{kib}}. This setting defaults to `false` because it can be a more cumbersome user experience, but it can also be an effective protection to stop users piggy-backing on existing IdP sessions. diff --git a/raw-migrated-files/kibana/kibana/connect-to-elasticsearch.md b/raw-migrated-files/kibana/kibana/connect-to-elasticsearch.md index 4ca00fe905..af1634f2a1 100644 --- a/raw-migrated-files/kibana/kibana/connect-to-elasticsearch.md +++ b/raw-migrated-files/kibana/kibana/connect-to-elasticsearch.md @@ -72,7 +72,7 @@ The upload feature is not intended for use as part of a repeated production proc :::{image} ../../../images/kibana-add-data-fv.png -:alt: Uploading a file in {kib} +:alt: Uploading a file in {{kib}} :class: screenshot ::: diff --git a/raw-migrated-files/observability-docs/observability/apm.md b/raw-migrated-files/observability-docs/observability/apm.md index 37ea2ef71e..6e32b1a2f3 100644 --- a/raw-migrated-files/observability-docs/observability/apm.md +++ b/raw-migrated-files/observability-docs/observability/apm.md @@ -3,7 +3,7 @@ Elastic APM is an application performance monitoring system built on the {{stack}}. It allows you to monitor software services and applications in real time, by collecting detailed performance information on response time for incoming requests, database queries, calls to caches, external HTTP requests, and more. This makes it easy to pinpoint and fix performance problems quickly. :::{image} ../../../images/observability-apm-app-landing.png -:alt: Applications UI in {kib} +:alt: Applications UI in {{kib}} :class: screenshot ::: diff --git a/raw-migrated-files/observability-docs/observability/logs-metrics-get-started.md b/raw-migrated-files/observability-docs/observability/logs-metrics-get-started.md index 84870737b1..043f2eba05 100644 --- a/raw-migrated-files/observability-docs/observability/logs-metrics-get-started.md +++ b/raw-migrated-files/observability-docs/observability/logs-metrics-get-started.md @@ -60,7 +60,7 @@ The **Add agent** flyout has two options: **Enroll in {{fleet}}** and **Run stan 2. Download, install, and enroll the {{agent}} on your host by selecting your host operating system and following the **Install {{agent}} on your host** step. :::{image} ../../../images/observability-kibana-agent-flyout.png - :alt: Add agent flyout in {kib} + :alt: Add agent flyout in {{kib}} :class: screenshot ::: diff --git a/raw-migrated-files/observability-docs/observability/monitor-uptime-synthetics.md b/raw-migrated-files/observability-docs/observability/monitor-uptime-synthetics.md index 0653c46ea9..80bcfa8aeb 100644 --- a/raw-migrated-files/observability-docs/observability/monitor-uptime-synthetics.md +++ b/raw-migrated-files/observability-docs/observability/monitor-uptime-synthetics.md @@ -14,7 +14,7 @@ Synthetics periodically checks the status of your services and applications. Mon * [Browser monitors](../../../solutions/observability/apps/synthetic-monitoring.md#monitoring-synthetics) :::{image} ../../../images/observability-synthetics-monitor-page.png -:alt: {{synthetics-app}} in {kib} +:alt: {{synthetics-app}} in {{kib}} :class: screenshot ::: diff --git a/raw-migrated-files/observability-docs/observability/synthetics-analyze.md b/raw-migrated-files/observability-docs/observability/synthetics-analyze.md index d62d8dc848..7e4ee29b04 100644 --- a/raw-migrated-files/observability-docs/observability/synthetics-analyze.md +++ b/raw-migrated-files/observability-docs/observability/synthetics-analyze.md @@ -23,7 +23,7 @@ When you use a single monitor configuration to create monitors in multiple locat :::{image} ../../../images/observability-synthetics-monitor-page.png -:alt: {{synthetics-app}} in {kib} +:alt: {{synthetics-app}} in {{kib}} :class: screenshot ::: diff --git a/raw-migrated-files/observability-docs/observability/synthetics-params-secrets.md b/raw-migrated-files/observability-docs/observability/synthetics-params-secrets.md index d7b0f6e5ab..cc73e6ac01 100644 --- a/raw-migrated-files/observability-docs/observability/synthetics-params-secrets.md +++ b/raw-migrated-files/observability-docs/observability/synthetics-params-secrets.md @@ -34,7 +34,7 @@ In the {{synthetics-app}}: 3. Define parameters. :::{image} ../../../images/observability-synthetics-params-secrets-kibana-define.png -:alt: Global parameters tab on the Synthetics Settings page in {kib} +:alt: Global parameters tab on the Synthetics Settings page in {{kib}} :class: screenshot ::: diff --git a/raw-migrated-files/observability-docs/observability/synthetics-settings.md b/raw-migrated-files/observability-docs/observability/synthetics-settings.md index 5fec18101a..337d4d3f4a 100644 --- a/raw-migrated-files/observability-docs/observability/synthetics-settings.md +++ b/raw-migrated-files/observability-docs/observability/synthetics-settings.md @@ -43,7 +43,7 @@ You can enable and disable default alerts for individual monitors in a few ways: In the **Alerting** tab on the Synthetics Settings page, you can add and configure connectors. If you are running in Elastic Cloud, then an SMTP connector will automatically be configured, allowing you to easily set up email alerts. Read more about all available connectors in [Action types](../../../solutions/observability/incident-management/create-an-uptime-duration-anomaly-rule.md#action-types-duration). :::{image} ../../../images/observability-synthetics-settings-alerting.png -:alt: Alerting tab on the Synthetics Settings page in {kib} +:alt: Alerting tab on the Synthetics Settings page in {{kib}} :class: screenshot ::: @@ -55,7 +55,7 @@ In the **Alerting** tab on the Synthetics Settings page, you can add and configu In the **{{private-location}}s** tab, you can add and manage {{private-location}}s. After you [Set up {{fleet-server}} and {{agent}}](../../../solutions/observability/apps/monitor-resources-on-private-networks.md#synthetics-private-location-fleet-agent) and [Connect to the {{stack}}](../../../solutions/observability/apps/monitor-resources-on-private-networks.md#synthetics-private-location-connect), this is where you will add the {{private-location}} so you can specify it as the location for a monitor created using the {{synthetics-app}} or projects. :::{image} ../../../images/observability-synthetics-settings-private-locations.png -:alt: {{private-location}}s tab on the Synthetics Settings page in {kib} +:alt: {{private-location}}s tab on the Synthetics Settings page in {{kib}} :class: screenshot ::: @@ -67,7 +67,7 @@ Global parameters can be defined once and used across the configuration of light In the **Global parameters** tab, you can define variables and parameters. This is one of several methods you can use to define variables and parameters. To learn more about the other methods and which methods take precedence over others, see [Work with params and secrets](../../../solutions/observability/apps/work-with-params-secrets.md). :::{image} ../../../images/observability-synthetics-settings-global-parameters.png -:alt: Global parameters tab on the Synthetics Settings page in {kib} +:alt: Global parameters tab on the Synthetics Settings page in {{kib}} :class: screenshot ::: @@ -79,7 +79,7 @@ When you set up a synthetic monitor, data from the monitor is saved in [Elastics In the **Data retention** tab, use the links to jump to the relevant policy for each data stream. Learn more about the data included in each data stream in [Manage data retention](../../../solutions/observability/apps/manage-data-retention.md). :::{image} ../../../images/observability-synthetics-settings-data-retention.png -:alt: Data retention tab on the Synthetics Settings page in {kib} +:alt: Data retention tab on the Synthetics Settings page in {{kib}} :class: screenshot ::: @@ -97,7 +97,7 @@ To create a Project API key, you must be logged into {{kib}} as a user with the :::{image} ../../../images/observability-synthetics-settings-api-keys.png -:alt: Project API keys tab on the Synthetics Settings page in {kib} +:alt: Project API keys tab on the Synthetics Settings page in {{kib}} :class: screenshot ::: diff --git a/raw-migrated-files/security-docs/security/rules-ui-create.md b/raw-migrated-files/security-docs/security/rules-ui-create.md index 4d9a83190d..1f3076634b 100644 --- a/raw-migrated-files/security-docs/security/rules-ui-create.md +++ b/raw-migrated-files/security-docs/security/rules-ui-create.md @@ -695,7 +695,7 @@ Refer to [Action frequency: Summary of alerts](../../../explore-analyze/alerts-c * `{{context.alerts}}`: Array of detected alerts -* `{{{context.results_link}}}`: URL to the alerts in {kib} +* `{{{context.results_link}}}`: URL to the alerts in {{kib}} * `{{context.rule.anomaly_threshold}}`: Anomaly threshold score above which alerts are generated ({{ml}} rules only) * `{{context.rule.description}}`: Rule description * `{{context.rule.false_positives}}`: Rule false positives diff --git a/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md b/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md index fe74740dd3..b9dd343658 100644 --- a/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md +++ b/raw-migrated-files/stack-docs/elastic-stack/installing-stack-demo-self.md @@ -463,7 +463,7 @@ As with {{es}}, you can use RPM to install {{kib}} on another host. You can find 17. In the `status` command output, a URL is shown with: - * A host address to access {kib} + * A host address to access {{kib}} * A six digit verification code For example: diff --git a/solutions/observability/apps/uptime-monitoring-deprecated.md b/solutions/observability/apps/uptime-monitoring-deprecated.md index 00d73bcda5..07b9f76e55 100644 --- a/solutions/observability/apps/uptime-monitoring-deprecated.md +++ b/solutions/observability/apps/uptime-monitoring-deprecated.md @@ -36,7 +36,7 @@ In the {{uptime-app}}, you can monitor the status of network endpoints using the | **TCP monitor** | Monitor the services running on your hosts. The TCP monitor checks individual portsto make sure the service is accessible and running. | :::{image} ../../../images/observability-uptime-app.png -:alt: {{uptime-app}} in {kib} +:alt: {{uptime-app}} in {{kib}} :class: screenshot ::: diff --git a/solutions/observability/cicd.md b/solutions/observability/cicd.md index 1635710fc8..6eda11b779 100644 --- a/solutions/observability/cicd.md +++ b/solutions/observability/cicd.md @@ -291,21 +291,21 @@ For instance, you can follow the below steps: :::{image} ../../images/observability-jenkins-dashboard-import.png :alt: Import {{kib}} dashboard -:title: Import dashboard in {kib} +:title: Import dashboard in {{kib}} :class: screenshot ::: * The new dashboard is now ready to be used: :::{image} ../../images/observability-jenkins-dashboard-ready.png -:alt: Jenkins dashboard in {kib} +:alt: Jenkins dashboard in {{kib}} :title: Jenkins dashboard in {{kib}} is ready :class: screenshot ::: :::{image} ../../images/observability-jenkins-dashboard.png :alt: Jenkins dashboard -:title: Jenkins dashboard in {kib} +:title: Jenkins dashboard in {{kib}} :class: screenshot ::: diff --git a/solutions/observability/cloud/monitor-amazon-web-services-aws-with-amazon-data-firehose.md b/solutions/observability/cloud/monitor-amazon-web-services-aws-with-amazon-data-firehose.md index 9a647d7362..b5412fe155 100644 --- a/solutions/observability/cloud/monitor-amazon-web-services-aws-with-amazon-data-firehose.md +++ b/solutions/observability/cloud/monitor-amazon-web-services-aws-with-amazon-data-firehose.md @@ -16,7 +16,7 @@ Amazon Data Firehose is a popular service that allows you to send your service l In this tutorial, you’ll learn how to: -* Install AWS integration in {kib} +* Install AWS integration in {{kib}} * Create a delivery stream in Amazon Data Firehose * Specify the destination settings for your Firehose stream * Send data to the Firehose delivery stream diff --git a/solutions/observability/cloud/monitor-amazon-web-services-aws-with-elastic-serverless-forwarder.md b/solutions/observability/cloud/monitor-amazon-web-services-aws-with-elastic-serverless-forwarder.md index 68f8bbda54..bad74e0daa 100644 --- a/solutions/observability/cloud/monitor-amazon-web-services-aws-with-elastic-serverless-forwarder.md +++ b/solutions/observability/cloud/monitor-amazon-web-services-aws-with-elastic-serverless-forwarder.md @@ -18,7 +18,7 @@ In this tutorial, you’ll learn how to: * Enable AWS VPC flow logs to be sent to your S3 bucket * Create an SQS queue and notifications for VPC flow logs -* Install and configure the Elastic AWS integration from {kib} +* Install and configure the Elastic AWS integration from {{kib}} * Visualize and analyze AWS logs in the Elastic Stack diff --git a/solutions/observability/cloud/monitor-aws-network-firewall-logs.md b/solutions/observability/cloud/monitor-aws-network-firewall-logs.md index 43fe64a8e5..604de92131 100644 --- a/solutions/observability/cloud/monitor-aws-network-firewall-logs.md +++ b/solutions/observability/cloud/monitor-aws-network-firewall-logs.md @@ -16,7 +16,7 @@ You will go through the following steps: * Select a AWS Network Firewall-compatible resource * Create a delivery stream in Amazon Data Firehose * Set up logging to forward the logs to the Elastic stack using a Firehose stream -* Visualize your logs in {kib} +* Visualize your logs in {{kib}} ## Before you begin [firehose-firewall-prerequisites] diff --git a/solutions/observability/cloud/monitor-cloudtrail-logs.md b/solutions/observability/cloud/monitor-cloudtrail-logs.md index 0ff3100886..2b248b6648 100644 --- a/solutions/observability/cloud/monitor-cloudtrail-logs.md +++ b/solutions/observability/cloud/monitor-cloudtrail-logs.md @@ -11,11 +11,11 @@ mapped_pages: In this section, you’ll learn how to monitor and analyze the CloudTrail logs you send to Elastic with Amazon Data Firehose. You will go through the following steps: -* Install AWS integration in {kib} +* Install AWS integration in {{kib}} * Export Cloudtrail events to CloudWatch * Set up a Firehose delivery stream * Set up a subscription filter to route Cloudtrail events to a delivery stream -* Visualize your CloudTrail logs in {kib} +* Visualize your CloudTrail logs in {{kib}} ## Before you begin [firehose-cloudtrail-prerequisites] diff --git a/solutions/observability/cloud/monitor-cloudwatch-logs.md b/solutions/observability/cloud/monitor-cloudwatch-logs.md index a21da3d63d..18dc4adc8d 100644 --- a/solutions/observability/cloud/monitor-cloudwatch-logs.md +++ b/solutions/observability/cloud/monitor-cloudwatch-logs.md @@ -13,11 +13,11 @@ In this section, you’ll learn how to export log events from CloudWatch logs to You’ll go through the following steps: -* Install AWS integration in {kib} +* Install AWS integration in {{kib}} * Select a CloudWatch log group to monitor * Create a delivery stream in Amazon Data Firehose * Set up a subscription filter to forward the logs using the Firehose stream -* Visualize your logs in {kib} +* Visualize your logs in {{kib}} ## Before you begin [firehose-cloudwatch-prerequisites] diff --git a/solutions/observability/cloud/monitor-web-application-firewall-waf-logs.md b/solutions/observability/cloud/monitor-web-application-firewall-waf-logs.md index f7f34d2140..b4ae8abb23 100644 --- a/solutions/observability/cloud/monitor-web-application-firewall-waf-logs.md +++ b/solutions/observability/cloud/monitor-web-application-firewall-waf-logs.md @@ -17,7 +17,7 @@ You will go through the following steps: * Create a delivery stream in Amazon Data Firehose * Create a web Access Control List (ACL) to generate WAF logs * Set up logging to forward the logs to the {{stack}} using a Firehose stream -* Visualize your WAF logs in {kib} +* Visualize your WAF logs in {{kib}} ## Before you begin [firehose-waf-prerequisites] diff --git a/solutions/observability/unknown-bucket/monitor-aws-with-amazon-data-firehose.md b/solutions/observability/unknown-bucket/monitor-aws-with-amazon-data-firehose.md index daa5dc578b..d6069d56d8 100644 --- a/solutions/observability/unknown-bucket/monitor-aws-with-amazon-data-firehose.md +++ b/solutions/observability/unknown-bucket/monitor-aws-with-amazon-data-firehose.md @@ -16,7 +16,7 @@ Amazon Data Firehose is a popular service that allows you to send your service l In this tutorial, you’ll learn how to: -* Install AWS integration in {kib} +* Install AWS integration in {{kib}} * Create a delivery stream in Amazon Data Firehose * Specify the destination settings for your Firehose stream * Send data to the Firehose delivery stream From 4fe529c527596c24b00885f074c1ee6a2eca6b6c Mon Sep 17 00:00:00 2001 From: shainaraskas Date: Thu, 13 Feb 2025 14:56:00 -0500 Subject: [PATCH 6/6] fix instances with other patterns --- .../elastic-cloud/azure-native-isv-service.md | 2 +- .../self-managed/configure-elasticsearch.md | 2 +- .../monitor/monitoring-data/kibana-alerts.md | 2 +- .../remote-clusters/ece-enable-ccs.md | 2 +- .../set-up-basic-security-plus-https.md | 2 +- .../searchable-snapshots.md | 2 +- .../elasticsearch-privileges.md | 2 +- .../alerts/alerting-getting-started.md | 2 +- .../alerts-cases/cases/manage-cases.md | 2 +- explore-analyze/find-and-organize/reports.md | 2 +- .../anomaly-detection/geographic-anomalies.md | 2 +- .../anomaly-detection/mapping-anomalies.md | 2 +- .../anomaly-detection/ml-api-quickref.md | 4 +-- .../languages/esql-getting-started.md | 2 +- .../custom-visualizations-with-vega.md | 2 +- .../maps/indexing-geojson-data-tutorial.md | 2 +- .../agent-apis.md | 4 +-- .../agent-es-airgapped.md | 2 +- .../agent-installed.md | 4 +-- .../agent-kafka-es.md | 4 +-- .../agent-kafka-essink.md | 4 +-- .../agent-kafka-ls.md | 2 +- .../agent-ls-airgapped.md | 4 +-- .../agent-ls.md | 2 +- .../agent-proxy.md | 8 ++--- .../agent-to-es.md | 2 +- .../ls-enrich.md | 2 +- .../ls-for-input.md | 2 +- .../ls-networkbridge.md | 4 +-- .../ingest-reference-architectures/lspq.md | 2 +- .../use-case-arch.md | 2 +- .../ingest-pipelines-serverless.md | 4 +-- ...ed-data-stream-to-data-stream-lifecycle.md | 12 ++++---- .../ech-cpu-usage-exceed-allowed-threshold.md | 2 +- ...jvm-heap-usage-exceed-allowed-threshold.md | 2 +- .../ec-scenario_why_is_my_node_unavailable.md | 4 +-- .../elasticsearch-reference/kerberos-realm.md | 2 +- .../security-basic-setup-https.md | 2 +- .../apm-getting-started-apm-server.md | 10 +++---- .../observability/manage-cases-settings.md | 10 +++---- .../observability/synthetics-analyze.md | 24 +++++++-------- .../synthetics-get-started-ui.md | 4 +-- .../observability/synthetics-get-started.md | 4 +-- .../security/advanced-settings.md | 2 +- .../security/cases-manage-settings.md | 12 ++++---- .../security-docs/security/cases-overview.md | 10 +++---- .../security/connect-to-byo-llm.md | 2 +- .../security/detection-entity-dashboard.md | 2 +- .../apps/fleet-managed-apm-server.md | 2 +- .../apps/monitor-fleet-managed-apm-server.md | 2 +- ...lastic-cloud-cluster-to-apm-integration.md | 2 +- .../apps/synthetics-support-matrix.md | 4 +-- .../apps/tutorial-monitor-java-application.md | 2 +- .../apps/uptime-monitoring-deprecated.md | 2 +- .../use-synthetics-with-traffic-filters.md | 2 +- solutions/observability/cicd.md | 30 +++++++++---------- .../cloud/gcp-dataflow-templates.md | 2 +- ...tutorial-observe-kubernetes-deployments.md | 2 +- .../upgrade-universal-profiling.md | 4 +-- .../learning-to-rank-model-training.md | 2 +- .../search/ranking/semantic-reranking.md | 2 +- .../search-application-security.md | 4 +-- .../troubleshoot-migrate-to-tiers.md | 8 ++--- troubleshoot/ingest/fleet/common-problems.md | 2 +- troubleshoot/kibana/alerts.md | 2 +- troubleshoot/kibana/task-manager.md | 2 +- troubleshoot/observability/inspect.md | 2 +- 67 files changed, 134 insertions(+), 134 deletions(-) diff --git a/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md b/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md index a1b18bcafe..887200e32d 100644 --- a/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md +++ b/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md @@ -298,7 +298,7 @@ $$$azure-integration-whats-included$$$What is included in my {{ecloud}} deployme : Each {{ecloud}} deployment includes: * An {{es}} cluster - * A {{kib}} instance which provides data visualization and a front-end for the {stack} + * A {{kib}} instance which provides data visualization and a front-end for the {{stack}} * An APM server that allows you to easily collect application traces * An {{ents}} instance that allows you to easily build a search experience with an intuitive interface diff --git a/deploy-manage/deploy/self-managed/configure-elasticsearch.md b/deploy-manage/deploy/self-managed/configure-elasticsearch.md index 4a6743aacc..b3b9c3a6a1 100644 --- a/deploy-manage/deploy/self-managed/configure-elasticsearch.md +++ b/deploy-manage/deploy/self-managed/configure-elasticsearch.md @@ -14,7 +14,7 @@ The configuration files should contain settings which are node-specific (such as {{es}} has three configuration files: -* `elasticsearch.yml` for configuring {es} +* `elasticsearch.yml` for configuring {{es}} * `jvm.options` for configuring {{es}} JVM settings * `log4j2.properties` for configuring {{es}} logging diff --git a/deploy-manage/monitor/monitoring-data/kibana-alerts.md b/deploy-manage/monitor/monitoring-data/kibana-alerts.md index 3c2ebf373c..c40a090d23 100644 --- a/deploy-manage/monitor/monitoring-data/kibana-alerts.md +++ b/deploy-manage/monitor/monitoring-data/kibana-alerts.md @@ -15,7 +15,7 @@ applies: The {{stack}} {{monitor-features}} provide [Alerting rules](../../../explore-analyze/alerts-cases/alerts.md) out-of-the box to notify you of potential issues in the {{stack}}. These rules are preconfigured based on the best practices recommended by Elastic. However, you can tailor them to meet your specific needs. :::{image} ../../../images/kibana-monitoring-kibana-alerting-notification.png -:alt: {{kib}} alerting notifications in {stack-monitor-app} +:alt: {{kib}} alerting notifications in {{stack-monitor-app}} :class: screenshot ::: diff --git a/deploy-manage/remote-clusters/ece-enable-ccs.md b/deploy-manage/remote-clusters/ece-enable-ccs.md index 05c64717ad..8c64f523ae 100644 --- a/deploy-manage/remote-clusters/ece-enable-ccs.md +++ b/deploy-manage/remote-clusters/ece-enable-ccs.md @@ -13,7 +13,7 @@ These remote clusters could be: * Another {{es}} cluster of your ECE installation * An {{es}} cluster in a remote ECE installation -* An {{es}} cluster hosted on {ecloud} +* An {{es}} cluster hosted on {{ecloud}} * Any other self-managed {{es}} cluster diff --git a/deploy-manage/security/set-up-basic-security-plus-https.md b/deploy-manage/security/set-up-basic-security-plus-https.md index c6f7ec0e03..cf376793da 100644 --- a/deploy-manage/security/set-up-basic-security-plus-https.md +++ b/deploy-manage/security/set-up-basic-security-plus-https.md @@ -219,7 +219,7 @@ Typically, you need to create the following separate roles: * **setup** role for setting up index templates and other dependencies * **monitoring** role for sending monitoring information -* **writer** role for publishing events collected by {metricbeat} +* **writer** role for publishing events collected by {{metricbeat}} * **reader** role for Kibana users who need to view and create visualizations that access {{metricbeat}} data ::::{note} diff --git a/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md b/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md index fd7a4898b0..b3fc380e21 100644 --- a/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md +++ b/deploy-manage/tools/snapshot-and-restore/searchable-snapshots.md @@ -173,7 +173,7 @@ A snapshot of a {{search-snap}} index contains only a small amount of metadata w Because {{search-snap}} indices are not regular indices, it is not possible to use a [source-only repository](source-only-repository.md) to take snapshots of {{search-snap}} indices. -::::{admonition} Reliability of {search-snaps} +::::{admonition} Reliability of {{search-snaps}} :class: warning :name: searchable-snapshots-reliability diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md b/deploy-manage/users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md index 5039fdeca9..062cc7fba0 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/elasticsearch-privileges.md @@ -234,7 +234,7 @@ This section lists the privileges that you can assign to a role. `read_ilm` -: All read-only {{Ilm}} operations, such as getting policies and checking the status of {Ilm} +: All read-only {{Ilm}} operations, such as getting policies and checking the status of {{Ilm}} This privilege is not available in {{serverless-full}}. diff --git a/explore-analyze/alerts-cases/alerts/alerting-getting-started.md b/explore-analyze/alerts-cases/alerts/alerting-getting-started.md index 9a141bca26..b097e508d1 100644 --- a/explore-analyze/alerts-cases/alerts/alerting-getting-started.md +++ b/explore-analyze/alerts-cases/alerts/alerting-getting-started.md @@ -114,7 +114,7 @@ This section will clarify some of the important differences in the function and Functionally, the {{alert-features}} differ in that: -* Scheduled checks are run on {{kib}} instead of {es} +* Scheduled checks are run on {{kib}} instead of {{es}} * {{kib}} [rules hide the details of detecting conditions](../../../explore-analyze/alerts-cases/alerts/alerting-getting-started.md#alerting-concepts-conditions) through rule types, whereas watches provide low-level control over inputs, conditions, and transformations. * {{kib}} rules track and persist the state of each detected condition through alerts. This makes it possible to mute and throttle individual alerts, and detect changes in state such as resolution. * Actions are linked to alerts. Actions are fired for each occurrence of a detected condition, rather than for the entire rule. diff --git a/explore-analyze/alerts-cases/cases/manage-cases.md b/explore-analyze/alerts-cases/cases/manage-cases.md index 0ec50785bd..92769b2c6f 100644 --- a/explore-analyze/alerts-cases/cases/manage-cases.md +++ b/explore-analyze/alerts-cases/cases/manage-cases.md @@ -16,7 +16,7 @@ Open a new case to keep track of issues and share their details with colleagues. 1. Go to **Management > {{stack-manage-app}} > Cases**, then click **Create case**. :::{image} ../../../images/kibana-cases-create.png - :alt: Create a case in {stack-manage-app} + :alt: Create a case in {{stack-manage-app}} :class: screenshot ::: diff --git a/explore-analyze/find-and-organize/reports.md b/explore-analyze/find-and-organize/reports.md index 6afb95bfcc..799f5764f2 100644 --- a/explore-analyze/find-and-organize/reports.md +++ b/explore-analyze/find-and-organize/reports.md @@ -15,7 +15,7 @@ For example, in **Discover**, you can create and download comma-separated values To view and manage reports, go to **Management** > **Reporting**. :::{image} ../../images/serverless-reports-management.png -:alt: {reports-app} +:alt: {{reports-app}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/anomaly-detection/geographic-anomalies.md b/explore-analyze/machine-learning/anomaly-detection/geographic-anomalies.md index 32dd390ec8..68ca2bd8df 100644 --- a/explore-analyze/machine-learning/anomaly-detection/geographic-anomalies.md +++ b/explore-analyze/machine-learning/anomaly-detection/geographic-anomalies.md @@ -28,7 +28,7 @@ This example uses the sample eCommerce orders and sample web logs data sets. For To get the best results from {{ml}} analytics, you must understand your data. You can use the **{{data-viz}}** in the **{{ml-app}}** app for this purpose. Search for specific fields or field types, such as geo-point fields in the sample data sets. You can see how many documents contain those fields within a specific time period and sample size. You can also see the number of distinct values, a list of example values, and preview them on a map. For example: :::{image} ../../../images/machine-learning-weblogs-data-visualizer-geopoint.jpg -:alt: A screenshot of a geo_point field in {data-viz} +:alt: A screenshot of a geo_point field in {{data-viz}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/anomaly-detection/mapping-anomalies.md b/explore-analyze/machine-learning/anomaly-detection/mapping-anomalies.md index 841a3b0746..11d8cad6b1 100644 --- a/explore-analyze/machine-learning/anomaly-detection/mapping-anomalies.md +++ b/explore-analyze/machine-learning/anomaly-detection/mapping-anomalies.md @@ -21,7 +21,7 @@ This example uses the sample web logs data set. For more information, see [Add t If you have fields that contain valid vector layers, you can use the **{{data-viz}}** in the **{{ml-app}}** app to see a choropleth map, in which each area is colored based on its document count. For example: :::{image} ../../../images/machine-learning-weblogs-data-visualizer-choropleth.png -:alt: A screenshot of a field that contains vector layer values in {data-viz} +:alt: A screenshot of a field that contains vector layer values in {{data-viz}} :class: screenshot ::: diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-api-quickref.md b/explore-analyze/machine-learning/anomaly-detection/ml-api-quickref.md index 1c0ce58e5e..dec602671b 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-api-quickref.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-api-quickref.md @@ -16,11 +16,11 @@ All {{ml}} {{anomaly-detect}} endpoints have the following base: The main resources can be accessed with a variety of endpoints: -* [`/anomaly_detectors/`](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly): Create and manage {anomaly-jobs} +* [`/anomaly_detectors/`](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly): Create and manage {{anomaly-jobs}} * [`/calendars/`](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly): Create and manage calendars and scheduled events * [`/datafeeds/`](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly): Select data from {{es}} to be analyzed * [`/filters/`](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly): Create and manage filters for custom rules -* [`/results/`](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly): Access the results of an {anomaly-job} +* [`/results/`](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly): Access the results of an {{anomaly-job}} * [`/model_snapshots/`](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly): Manage model snapshots For a full list, see [{{ml-cap}} {{anomaly-detect}} APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml-anomaly). diff --git a/explore-analyze/query-filter/languages/esql-getting-started.md b/explore-analyze/query-filter/languages/esql-getting-started.md index 7e48623704..3e9a97fd72 100644 --- a/explore-analyze/query-filter/languages/esql-getting-started.md +++ b/explore-analyze/query-filter/languages/esql-getting-started.md @@ -120,7 +120,7 @@ You can adjust the editor’s height by dragging its bottom border to your likin Each {{esql}} query starts with a [source command](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-source-commands). A source command produces a table, typically with data from {{es}}. :::{image} ../../../images/elasticsearch-reference-source-command.svg -:alt: A source command producing a table from {es} +:alt: A source command producing a table from {{es}} ::: The [`FROM`](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-commands.html#esql-from) source command returns a table with documents from a data stream, index, or alias. Each row in the resulting table represents a document. This query returns up to 1000 documents from the `sample_data` index: diff --git a/explore-analyze/visualize/custom-visualizations-with-vega.md b/explore-analyze/visualize/custom-visualizations-with-vega.md index f2ba8a8f9c..d7ddf7f674 100644 --- a/explore-analyze/visualize/custom-visualizations-with-vega.md +++ b/explore-analyze/visualize/custom-visualizations-with-vega.md @@ -15,7 +15,7 @@ mapped_pages: Use **Vega** or **Vega-Lite** when you want to create visualizations with: * Aggregations that use `nested` or `parent/child` mapping -* Aggregations without a {data-source} +* Aggregations without a {{data-source}} * Queries that use custom time filters * Complex calculations * Extracted data from _source instead of aggregations diff --git a/explore-analyze/visualize/maps/indexing-geojson-data-tutorial.md b/explore-analyze/visualize/maps/indexing-geojson-data-tutorial.md index 70f1852302..14b7a3c9c2 100644 --- a/explore-analyze/visualize/maps/indexing-geojson-data-tutorial.md +++ b/explore-analyze/visualize/maps/indexing-geojson-data-tutorial.md @@ -11,7 +11,7 @@ mapped_pages: In this tutorial, you’ll build a customized map that shows the flight path between two airports, and the lightning hot spots on that route. You’ll learn to: * Import GeoJSON files into Kibana -* Index the files in {es} +* Index the files in {{es}} * Display the data in a multi-layer map diff --git a/manage-data/ingest/ingest-reference-architectures/agent-apis.md b/manage-data/ingest/ingest-reference-architectures/agent-apis.md index ded0799b85..38b57f019f 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-apis.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-apis.md @@ -6,11 +6,11 @@ mapped_pages: # Elastic Agent to Elasticsearch: APIs for collection [agent-apis] :::{image} ../../../images/ingest-ea-apis.png -:alt: Image showing {{agent}} collecting data using APIs and sending to {es} +:alt: Image showing {{agent}} collecting data using APIs and sending to {{es}} ::: Ingest model -: Control path: {{agent}} to {{fleet}} to {{es}}
Data path: {{agent}} running on a user-managed host to collect data about the external infrastructure through APIs, and then forwarding to {es} +: Control path: {{agent}} to {{fleet}} to {{es}}
Data path: {{agent}} running on a user-managed host to collect data about the external infrastructure through APIs, and then forwarding to {{es}} Use when : An {{agent}} integration exists for software components that expose APIs for data collection diff --git a/manage-data/ingest/ingest-reference-architectures/agent-es-airgapped.md b/manage-data/ingest/ingest-reference-architectures/agent-es-airgapped.md index 34637f5bbb..2980812973 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-es-airgapped.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-es-airgapped.md @@ -13,7 +13,7 @@ Ingest model : All {{stack}} components deployed inside a DMZ: * Control path: {{agent}} to {{fleet}} to {{es}}
- * Data path: {{agent}} to {es} + * Data path: {{agent}} to {{es}} Use when diff --git a/manage-data/ingest/ingest-reference-architectures/agent-installed.md b/manage-data/ingest/ingest-reference-architectures/agent-installed.md index f6edbfd6b1..35605a4da5 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-installed.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-installed.md @@ -6,11 +6,11 @@ mapped_pages: # Elastic Agent to Elasticsearch: Agent installed [agent-installed] :::{image} ../../../images/ingest-ea-agent-installed.png -:alt: Image showing {{agent}} collecting data and sending to {es} +:alt: Image showing {{agent}} collecting data and sending to {{es}} ::: Ingest model -: Control path: {{agent}} to {{fleet}} to {{es}}
Data path: {{agent}} to {es} +: Control path: {{agent}} to {{fleet}} to {{es}}
Data path: {{agent}} to {{es}} Use when : An {{agent}} integration exists in the {{kib}} integrations UI for the software you want to monitor, observe, and protect. diff --git a/manage-data/ingest/ingest-reference-architectures/agent-kafka-es.md b/manage-data/ingest/ingest-reference-architectures/agent-kafka-es.md index 39b6a49be3..a7bdebb7df 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-kafka-es.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-kafka-es.md @@ -6,7 +6,7 @@ mapped_pages: # Elastic Agent to Elasticsearch with Kafka as middleware message queue [agent-kafka-es] :::{image} ../../../images/ingest-ea-kafka.png -:alt: Image showing {{agent}} collecting data and using Kafka as a message queue enroute to {es} +:alt: Image showing {{agent}} collecting data and using Kafka as a message queue enroute to {{es}} ::: Ingest models @@ -16,7 +16,7 @@ Ingest models Use when -: You are standardizing on Kafka as middleware message queue between {{agent}} and {es} +: You are standardizing on Kafka as middleware message queue between {{agent}} and {{es}} Notes : The transformation from raw data to Elastic Common Schema (ECS) and any other enrichment can be handled by {{ls}} as described in [{{agent}} to {{ls}} (for enrichment) to {{es}}](ls-enrich.md). diff --git a/manage-data/ingest/ingest-reference-architectures/agent-kafka-essink.md b/manage-data/ingest/ingest-reference-architectures/agent-kafka-essink.md index f7dfd21a30..4e4dcbde1b 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-kafka-essink.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-kafka-essink.md @@ -6,7 +6,7 @@ mapped_pages: # Elastic Agent to Logstash to Kafka to Kafka ES Sink to Elasticsearch: Kafka as middleware message queue [agent-kafka-essink] :::{image} ../../../images/ingest-ls-kafka-essink.png -:alt: Image showing {{agent}} collecting data and using Kafka as a message queue enroute to {es} +:alt: Image showing {{agent}} collecting data and using Kafka as a message queue enroute to {{es}} ::: Ingest model @@ -16,7 +16,7 @@ Ingest model Use when -: You are standardizing on Kafka as middleware message queue between {{agent}} and {es} +: You are standardizing on Kafka as middleware message queue between {{agent}} and {{es}} Notes : The transformation from raw data to Elastic Common Schema (ECS) and any other enrichment can be handled by {{ls}} as described in [{{agent}} to {{ls}} (for enrichment) to {{es}}](ls-enrich.md). diff --git a/manage-data/ingest/ingest-reference-architectures/agent-kafka-ls.md b/manage-data/ingest/ingest-reference-architectures/agent-kafka-ls.md index c221cd2271..d7b86c6f53 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-kafka-ls.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-kafka-ls.md @@ -16,7 +16,7 @@ Ingest model Use when -: You are standardizing on Kafka as middleware message queue between {{agent}} and {es} +: You are standardizing on Kafka as middleware message queue between {{agent}} and {{es}} Notes : The transformation from raw data to Elastic Common Schema (ECS) and any other enrichment can be handled by {{ls}} as described in [{{agent}} to {{ls}} (for enrichment) to {{es}}](ls-enrich.md). diff --git a/manage-data/ingest/ingest-reference-architectures/agent-ls-airgapped.md b/manage-data/ingest/ingest-reference-architectures/agent-ls-airgapped.md index 72dc50a45d..595035bbc2 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-ls-airgapped.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-ls-airgapped.md @@ -6,14 +6,14 @@ mapped_pages: # Elastic Agent to Logstash: Air-gapped environment [agent-ls-airgapped] :::{image} ../../../images/ingest-ea-ls-airgapped.png -:alt: Image showing {agent} +:alt: Image showing {{agent}} ::: Ingest model : All {{stack}} components deployed inside a DMZ: * Control path: {{agent}} to {{fleet}} to {{es}}
- * Data path: {{agent}} to {es} + * Data path: {{agent}} to {{es}} Use when diff --git a/manage-data/ingest/ingest-reference-architectures/agent-ls.md b/manage-data/ingest/ingest-reference-architectures/agent-ls.md index 05a14b1b0c..70b208e3e4 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-ls.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-ls.md @@ -6,7 +6,7 @@ mapped_pages: # Elastic Agent to Logstash to Elasticsearch [agent-ls] :::{image} ../../../images/ingest-ea-ls-es.png -:alt: Image showing {{agent}} to {{ls}} to {es} +:alt: Image showing {{agent}} to {{ls}} to {{es}} ::: Ingest models diff --git a/manage-data/ingest/ingest-reference-architectures/agent-proxy.md b/manage-data/ingest/ingest-reference-architectures/agent-proxy.md index 5b634b1974..cfad026bac 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-proxy.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-proxy.md @@ -19,15 +19,15 @@ This architecture works with a variety of proxying tools to allow for more flexi Ingest model -:
Control path for {{fleet-server}} on {{ecloud}}: {{agent}} to proxy to {{fleet-server}} to {{es}}
Control path for self-managed {{fleet-server}}: {{agent}} to {{fleet-server}} to proxy to {{es}}
Data path: {{agent}} to proxy to {es} +:
Control path for {{fleet-server}} on {{ecloud}}: {{agent}} to proxy to {{fleet-server}} to {{es}}
Control path for self-managed {{fleet-server}}: {{agent}} to {{fleet-server}} to proxy to {{es}}
Data path: {{agent}} to proxy to {{es}} Use when -: * Network restrictions prevent connection between {{agent}} network and network where {{fleet-server}} and {{stack}} are deployed, as when {{fleet-server}} is deployed on {ecloud} -* Network restrictions prevent connection between {{agent}} and {{fleet-server}} network and the network where {{stack}} is deployed, as when {{stack}} is deployed on {ecloud} +: * Network restrictions prevent connection between {{agent}} network and network where {{fleet-server}} and {{stack}} are deployed, as when {{fleet-server}} is deployed on {{ecloud}} +* Network restrictions prevent connection between {{agent}} and {{fleet-server}} network and the network where {{stack}} is deployed, as when {{stack}} is deployed on {{ecloud}} * Using [{{ls}} as proxy](ls-networkbridge.md) is not feasible. -Currently {{agent}} is not able to present a certificate for connectivity to {{fleet-server}}. Therefore if a proxy placed between the {{agent}} and {{fleet-server}} is configured for mutual TLS, {{agents}} won’t be able to establish connectivity to {fleet server}. +Currently {{agent}} is not able to present a certificate for connectivity to {{fleet-server}}. Therefore if a proxy placed between the {{agent}} and {{fleet-server}} is configured for mutual TLS, {{agents}} won’t be able to establish connectivity to {{fleet server}}. ## Resources [agent-proxy-resources] diff --git a/manage-data/ingest/ingest-reference-architectures/agent-to-es.md b/manage-data/ingest/ingest-reference-architectures/agent-to-es.md index 7e8f27345b..cdd2378436 100644 --- a/manage-data/ingest/ingest-reference-architectures/agent-to-es.md +++ b/manage-data/ingest/ingest-reference-architectures/agent-to-es.md @@ -10,7 +10,7 @@ To ingest data into {{es}}, use the *simplest option that meets your needs* and Integrations offer advantages beyond easier data collection—​advantages such as dashboards, central agent management, and easy enablement of [Elastic solutions](https://www.elastic.co/products/), such as Security and Observability. :::{image} ../../../images/ingest-ea-es.png -:alt: Image showing {{agent}} collecting data and sending to {es} +:alt: Image showing {{agent}} collecting data and sending to {{es}} ::: diff --git a/manage-data/ingest/ingest-reference-architectures/ls-enrich.md b/manage-data/ingest/ingest-reference-architectures/ls-enrich.md index d4208cf10c..04cdf70f5f 100644 --- a/manage-data/ingest/ingest-reference-architectures/ls-enrich.md +++ b/manage-data/ingest/ingest-reference-architectures/ls-enrich.md @@ -11,7 +11,7 @@ mapped_pages: Ingest models : * {{agent}} to {{es}} using {{ls}} to enrich the data -* {{agent}} to {{ls}} for enrichment based on fields in the {{agent}} data to {es} +* {{agent}} to {{ls}} for enrichment based on fields in the {{agent}} data to {{es}} Use when diff --git a/manage-data/ingest/ingest-reference-architectures/ls-for-input.md b/manage-data/ingest/ingest-reference-architectures/ls-for-input.md index 51aac06d6a..554e883f6a 100644 --- a/manage-data/ingest/ingest-reference-architectures/ls-for-input.md +++ b/manage-data/ingest/ingest-reference-architectures/ls-for-input.md @@ -6,7 +6,7 @@ mapped_pages: # Logstash to Elasticsearch [ls-for-input] :::{image} ../../../images/ingest-ls-es.png -:alt: Image showing {{ls}} collecting data and sending to {es} +:alt: Image showing {{ls}} collecting data and sending to {{es}} ::: Ingest model diff --git a/manage-data/ingest/ingest-reference-architectures/ls-networkbridge.md b/manage-data/ingest/ingest-reference-architectures/ls-networkbridge.md index 244944354c..3ed9a053be 100644 --- a/manage-data/ingest/ingest-reference-architectures/ls-networkbridge.md +++ b/manage-data/ingest/ingest-reference-architectures/ls-networkbridge.md @@ -6,11 +6,11 @@ mapped_pages: # Elastic Agent to Logstash to Elasticsearch: Logstash as a proxy [ls-networkbridge] :::{image} ../../../images/ingest-ea-ls-bridge.png -:alt: Image showing {{agent}}s collecting data and sending to {{ls}} for proxying before sending on to {es} +:alt: Image showing {{agent}}s collecting data and sending to {{ls}} for proxying before sending on to {{es}} ::: Ingest model -: Data path: {{agent}} to {{ls}} as bridge to {{es}} on {{stack}} network
Control path: {{agent}} to {{fleet-server}} to {es} +: Data path: {{agent}} to {{ls}} as bridge to {{es}} on {{stack}} network
Control path: {{agent}} to {{fleet-server}} to {{es}} Use when : Agents have network restrictions for connecting to {{es}} on {{stack}} deployed outside of the agent network diff --git a/manage-data/ingest/ingest-reference-architectures/lspq.md b/manage-data/ingest/ingest-reference-architectures/lspq.md index 43ef285285..2816bc03c9 100644 --- a/manage-data/ingest/ingest-reference-architectures/lspq.md +++ b/manage-data/ingest/ingest-reference-architectures/lspq.md @@ -10,7 +10,7 @@ mapped_pages: ::: Ingest model -: {{agent}} to {{ls}} persistent queue to {es} +: {{agent}} to {{ls}} persistent queue to {{es}} Use when : Your data flow may encounter network issues, bursts of events, and/or downstream unavailability and you need the ability to buffer the data before ingestion. diff --git a/manage-data/ingest/ingest-reference-architectures/use-case-arch.md b/manage-data/ingest/ingest-reference-architectures/use-case-arch.md index 9c7fd04f24..0003026310 100644 --- a/manage-data/ingest/ingest-reference-architectures/use-case-arch.md +++ b/manage-data/ingest/ingest-reference-architectures/use-case-arch.md @@ -10,7 +10,7 @@ We offer a variety of ingest architectures to serve a wide range of use cases an To ingest data into {{es}}, use the *simplest option that meets your needs* and satisfies your use case. For many users and use cases, the simplest approach is ingesting data with {{agent}} and sending it to {{es}}. {{agent}} and [{{agent}} integrations](https://www.elastic.co/integrations/) are available for many popular platforms and services, and are a good place to start. ::::{tip} -You can host {{es}} on your own hardware or send your data to {{es}} on {{ecloud}}. For most users, {{agent}} writing directly to {{es}} on {{ecloud}} provides the easiest and fastest time to value. {ess-leadin-short} +You can host {{es}} on your own hardware or send your data to {{es}} on {{ecloud}}. For most users, {{agent}} writing directly to {{es}} on {{ecloud}} provides the easiest and fastest time to value. {{ess-leadin-short}} :::: diff --git a/manage-data/ingest/transform-enrich/ingest-pipelines-serverless.md b/manage-data/ingest/transform-enrich/ingest-pipelines-serverless.md index 6e96f1f16e..1f1bc336a2 100644 --- a/manage-data/ingest/transform-enrich/ingest-pipelines-serverless.md +++ b/manage-data/ingest/transform-enrich/ingest-pipelines-serverless.md @@ -24,7 +24,7 @@ In **{{project-settings}} → {{manage-app}} → {{ingest-pipelines-app}}**, you * Delete pipelines :::{image} ../../../images/serverless-ingest-pipelines-management.png -:alt: {ingest-pipelines-app} +:alt: {{ingest-pipelines-app}} :class: screenshot ::: @@ -38,6 +38,6 @@ The **New pipeline from CSV** option lets you use a file with comma-separated va Before you use a pipeline in production, you should test it using sample documents. When creating or editing a pipeline in **{{ingest-pipelines-app}}**, click **Add documents***. In the ***Documents** tab, provide sample documents and click **Run the pipeline**: :::{image} ../../../images/serverless-ingest-pipelines-test.png -:alt: Test a pipeline in {ingest-pipelines-app} +:alt: Test a pipeline in {{ingest-pipelines-app}} :class: screenshot ::: diff --git a/manage-data/lifecycle/data-stream/tutorial-migrate-ilm-managed-data-stream-to-data-stream-lifecycle.md b/manage-data/lifecycle/data-stream/tutorial-migrate-ilm-managed-data-stream-to-data-stream-lifecycle.md index 2edc6b1a2d..f68181baa1 100644 --- a/manage-data/lifecycle/data-stream/tutorial-migrate-ilm-managed-data-stream-to-data-stream-lifecycle.md +++ b/manage-data/lifecycle/data-stream/tutorial-migrate-ilm-managed-data-stream-to-data-stream-lifecycle.md @@ -233,8 +233,8 @@ GET _data_stream/dsl-data-stream } ``` -1. The existing backing index will continue to be managed by {ilm-init} -2. The existing backing index will continue to be managed by {ilm-init} +1. The existing backing index will continue to be managed by {{ilm-init}} +2. The existing backing index will continue to be managed by {{ilm-init}} 3. The next generation index will be managed by Data stream lifecycle 4. The `prefer_ilm` setting value we configured in the index template is reflected and will be configured accordingly for new backing indices. @@ -302,8 +302,8 @@ GET _data_stream/dsl-data-stream } ``` -1. The backing indices that existed before rollover will continue to be managed by {ilm-init} -2. The backing indices that existed before rollover will continue to be managed by {ilm-init} +1. The backing indices that existed before rollover will continue to be managed by {{ilm-init}} +2. The backing indices that existed before rollover will continue to be managed by {{ilm-init}} 3. The new write index received the `false` value for the `prefer_ilm` setting, as we configured in the index template 4. The new write index is managed by `Data stream lifecycle` @@ -386,9 +386,9 @@ GET _data_stream/dsl-data-stream } ``` -1. The write index is now managed by {ilm-init} +1. The write index is now managed by {{ilm-init}} 2. The `lifecycle` configured on the data stream is now disabled. -3. The next write index will be managed by {ilm-init} +3. The next write index will be managed by {{ilm-init}} Had we removed the {{ilm-init}} policy from the index template when we [updated](#update-index-template-for-dsl) it, the write index of the data stream will now be `Unmanaged` because the index wouldn’t have the {{ilm-init}} policy configured to fallback onto. diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-cpu-usage-exceed-allowed-threshold.md b/raw-migrated-files/cloud/cloud-heroku/ech-cpu-usage-exceed-allowed-threshold.md index b14a5e2973..9c3d0a5416 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-cpu-usage-exceed-allowed-threshold.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-cpu-usage-exceed-allowed-threshold.md @@ -44,7 +44,7 @@ Stack Monitoring comes with out-of-the-box rules, but you need to enable them wh * Machine Learning jobs, watches, monitoring, ingest pipelines - * Internal tasks initiated by {es} + * Internal tasks initiated by {{es}} * Nodes joining and leaving due to hardware failures * Shard allocation due to nodes joining and leaving diff --git a/raw-migrated-files/cloud/cloud-heroku/ech-jvm-heap-usage-exceed-allowed-threshold.md b/raw-migrated-files/cloud/cloud-heroku/ech-jvm-heap-usage-exceed-allowed-threshold.md index 05a612bc11..ca4c39c7da 100644 --- a/raw-migrated-files/cloud/cloud-heroku/ech-jvm-heap-usage-exceed-allowed-threshold.md +++ b/raw-migrated-files/cloud/cloud-heroku/ech-jvm-heap-usage-exceed-allowed-threshold.md @@ -29,7 +29,7 @@ * Machine Learning jobs, watches, monitoring, ingest pipeline - * Internal tasks initiated by {es} + * Internal tasks initiated by {{es}} * Nodes joining and leaving due to hardware failures * Shard allocation due to nodes joining and leaving diff --git a/raw-migrated-files/cloud/cloud/ec-scenario_why_is_my_node_unavailable.md b/raw-migrated-files/cloud/cloud/ec-scenario_why_is_my_node_unavailable.md index 44248cbbbb..bd86bf77e6 100644 --- a/raw-migrated-files/cloud/cloud/ec-scenario_why_is_my_node_unavailable.md +++ b/raw-migrated-files/cloud/cloud/ec-scenario_why_is_my_node_unavailable.md @@ -162,7 +162,7 @@ If your {{es}} cluster is unhealthy and reports a status of red, the scale up co * Machine Learning jobs, watches, monitoring, ingest pipeline - * Internal tasks initiated by {es} + * Internal tasks initiated by {{es}} * Nodes joining and leaving due to hardware failures * Shard allocation due to nodes joining and leaving @@ -241,7 +241,7 @@ Stack Monitoring comes with out-of-the-box rules, but you need to enable them wh * Machine Learning jobs, watches, monitoring, ingest pipelines - * Internal tasks initiated by {es} + * Internal tasks initiated by {{es}} * Nodes joining and leaving due to hardware failures * Shard allocation due to nodes joining and leaving diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/kerberos-realm.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/kerberos-realm.md index 22a8348dd2..f86ede2d4e 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/kerberos-realm.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/kerberos-realm.md @@ -143,7 +143,7 @@ To configure a Kerberos realm in {{es}}: For detailed information of available realm settings, see [Kerberos realm settings](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#ref-kerberos-settings). -5. Restart {es} +5. Restart {{es}} 6. Map Kerberos users to roles. The `kerberos` realm enables you to map Kerberos users to roles. You can configure these role mappings by using the [create or update role mappings API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping). You identify users by their `username` field. diff --git a/raw-migrated-files/elasticsearch/elasticsearch-reference/security-basic-setup-https.md b/raw-migrated-files/elasticsearch/elasticsearch-reference/security-basic-setup-https.md index a6096862f4..dbddf3539d 100644 --- a/raw-migrated-files/elasticsearch/elasticsearch-reference/security-basic-setup-https.md +++ b/raw-migrated-files/elasticsearch/elasticsearch-reference/security-basic-setup-https.md @@ -215,7 +215,7 @@ Typically, you need to create the following separate roles: * **setup** role for setting up index templates and other dependencies * **monitoring** role for sending monitoring information -* **writer** role for publishing events collected by {metricbeat} +* **writer** role for publishing events collected by {{metricbeat}} * **reader** role for Kibana users who need to view and create visualizations that access {{metricbeat}} data ::::{note} diff --git a/raw-migrated-files/observability-docs/observability/apm-getting-started-apm-server.md b/raw-migrated-files/observability-docs/observability/apm-getting-started-apm-server.md index 8c08a3046a..67615e6885 100644 --- a/raw-migrated-files/observability-docs/observability/apm-getting-started-apm-server.md +++ b/raw-migrated-files/observability-docs/observability/apm-getting-started-apm-server.md @@ -31,8 +31,8 @@ Fleet is a web-based UI in {{kib}} that is used to centrally manage {{agent}}s. **Supported outputs**: -* {es} -* {ess} +* {{es}} +* {{ess}} ::::{note} Fleet-managed APM Server does *not* support all the outputs that are supported by the APM Server binary method of running Elastic APM. @@ -63,9 +63,9 @@ Install, configure, and run the APM Server binary wherever you need it. **Supported outputs**: -* {es} +* {{es}} * {ess} -* {ls} +* {{ls}} * Kafka * Redis * File @@ -75,7 +75,7 @@ Install, configure, and run the APM Server binary wherever you need it. * APM agents * APM Server -* {stack} +* {{stack}} **Configuration method**: YAML diff --git a/raw-migrated-files/observability-docs/observability/manage-cases-settings.md b/raw-migrated-files/observability-docs/observability/manage-cases-settings.md index e7a21f8a6f..c125f7611f 100644 --- a/raw-migrated-files/observability-docs/observability/manage-cases-settings.md +++ b/raw-migrated-files/observability-docs/observability/manage-cases-settings.md @@ -19,13 +19,13 @@ To close cases when they are sent to an external system, select **Automatically If you are using an external incident management system, you can integrate Elastic Observability cases with that system using *connectors*. These third-party systems are supported: -* {sn-itsm} -* {sn-sir} +* {{sn-itsm}} +* {{sn-sir}} * {{jira}} (including {{jira}} Service Desk) -* {ibm-r} -* {swimlane} +* {{ibm-r}} +* {{swimlane}} * TheHive -* {webhook-cm} +* {{webhook-cm}} ::::{important} To send cases to external systems, you need the appropriate license, and your role must have the **Cases** {{kib}} privilege as a user. For more details, refer to [Configure access to cases](../../../solutions/observability/incident-management/configure-access-to-cases.md). diff --git a/raw-migrated-files/observability-docs/observability/synthetics-analyze.md b/raw-migrated-files/observability-docs/observability/synthetics-analyze.md index 7e4ee29b04..31d912f9b9 100644 --- a/raw-migrated-files/observability-docs/observability/synthetics-analyze.md +++ b/raw-migrated-files/observability-docs/observability/synthetics-analyze.md @@ -49,7 +49,7 @@ When you go to an individual monitor’s page, you’ll see much more detail abo * The **![Pencil icon](../../../images/observability-pencil.svg "") Edit monitor** button that allows you to edit the monitor’s configuration. :::{image} ../../../images/observability-synthetics-analyze-individual-monitor-header.png -:alt: Header at the top of the individual monitor page for all monitor types in the {synthetics-app} +:alt: Header at the top of the individual monitor page for all monitor types in the {{synthetics-app}} :class: screenshot ::: @@ -61,7 +61,7 @@ Each individual monitor’s page has three tabs: Overview, History, and Errors. The **Overview** tab has information about the monitor availability, duration, and any errors that have occurred since the monitor was created. The *Duration trends* chart displays the timing for each check that was performed in the last 30 days. This visualization helps you to gain insights into how quickly requests resolve by the targeted endpoint and gives you a sense of how frequently a host or endpoint was down. :::{image} ../../../images/observability-synthetics-analyze-individual-monitor-details.png -:alt: Details in the Overview tab on the individual monitor page for all monitor types in the {synthetics-app} +:alt: Details in the Overview tab on the individual monitor page for all monitor types in the {{synthetics-app}} :class: screenshot ::: @@ -73,14 +73,14 @@ The **History** tab has information on every time the monitor has run. It includ For browser monitors, you can click on any run in the **Test runs** list to see the details for that run. Read more about what information is included the in [Details for one run](../../../solutions/observability/apps/analyze-data-from-synthetic-monitors.md#synthetics-analyze-one-run) section below. :::{image} ../../../images/observability-synthetics-analyze-individual-monitor-history.png -:alt: The History tab on the individual monitor page for all monitor types in the {synthetics-app} +:alt: The History tab on the individual monitor page for all monitor types in the {{synthetics-app}} :class: screenshot ::: If the monitor is configured to [retest on failure](../../../solutions/observability/apps/configure-synthetics-projects.md#synthetics-configuration-monitor), you’ll see retests listed in the **Test runs** table. Runs that are retests include a rerun icon (![Refresh icon](../../../images/observability-refresh.svg "")) next to the result badge. :::{image} ../../../images/observability-synthetics-retest.png -:alt: A failed run and a retest in the table of test runs in the {synthetics-app} +:alt: A failed run and a retest in the table of test runs in the {{synthetics-app}} :class: screenshot ::: @@ -94,7 +94,7 @@ The Errors tab includes a high-level overview of all alerts and a complete list For browser monitors, you can click on any run in the **Error** list to open an **Error details** page that includes most of the same information that is included the in [Details for one run](../../../solutions/observability/apps/analyze-data-from-synthetic-monitors.md#synthetics-analyze-one-run) section below. :::{image} ../../../images/observability-synthetics-analyze-individual-monitor-errors.png -:alt: The Errors tab on the individual monitor page for all monitor types in the {synthetics-app} +:alt: The Errors tab on the individual monitor page for all monitor types in the {{synthetics-app}} :class: screenshot ::: @@ -118,7 +118,7 @@ The journey page on the Overview tab includes: * A list of the **last 10 test runs** that link to the [details for each run](../../../solutions/observability/apps/analyze-data-from-synthetic-monitors.md#synthetics-analyze-one-run). :::{image} ../../../images/observability-synthetics-analyze-journeys-over-time.png -:alt: Individual journey page for browser monitors in the {synthetics-app} +:alt: Individual journey page for browser monitors in the {{synthetics-app}} :class: screenshot ::: @@ -137,7 +137,7 @@ At the top of the page, see the *Code executed* and any *Console* output for eac Navigate through each step using **![Previous icon](../../../images/observability-arrowLeft.svg "") Previous** and **Next ![Next icon](../../../images/observability-arrowRight.svg "")**. :::{image} ../../../images/observability-synthetics-analyze-one-run-code-executed.png -:alt: Step carousel on a page detailing one run of a browser monitor in the {synthetics-app} +:alt: Step carousel on a page detailing one run of a browser monitor in the {{synthetics-app}} :class: screenshot ::: @@ -168,7 +168,7 @@ Customize screenshot behavior for all monitors in the [configuration file](../.. Screenshots can be particularly helpful to identify what went wrong when a step fails because of a change to the UI. You can compare the failed step to the last time the step successfully completed. :::{image} ../../../images/observability-synthetics-analyze-one-step-screenshot.png -:alt: Screenshot for one step in a browser monitor in the {synthetics-app} +:alt: Screenshot for one step in a browser monitor in the {{synthetics-app}} :class: screenshot ::: @@ -190,7 +190,7 @@ Next to each network timing metric, there’s an icon that indicates whether the This gives you an overview of how much time is spent (and how that time is spent) loading resources. This high-level information may not help you diagnose a problem on its own, but it could act as a signal to look at more granular information in the [Network requests](../../../solutions/observability/apps/analyze-data-from-synthetic-monitors.md#synthetics-analyze-one-step-network) section. :::{image} ../../../images/observability-synthetics-analyze-one-step-timing.png -:alt: Network timing visualization for one step in a browser monitor in the {synthetics-app} +:alt: Network timing visualization for one step in a browser monitor in the {{synthetics-app}} :class: screenshot ::: @@ -214,7 +214,7 @@ Largest contentful paint and Cumulative layout shift are part of Google’s [Cor Next to each metric, there’s an icon that indicates whether the value is higher (![Value is higher icon](../../../images/observability-sortUp.svg "")), lower (![Value is lower icon](../../../images/observability-sortDown.svg "")), or the same (![Value is the same](../../../images/observability-minus.svg "")) compared to all runs over the last 24 hours. Hover over the icon to see more details in a tooltip. :::{image} ../../../images/observability-synthetics-analyze-one-step-metrics.png -:alt: Metrics visualization for one step in a browser monitor in the {synthetics-app} +:alt: Metrics visualization for one step in a browser monitor in the {{synthetics-app}} :class: screenshot ::: @@ -226,7 +226,7 @@ The **Object weight** visualization shows the cumulative size of downloaded reso This provides a different kind of analysis. For example, you might have a large number of JavaScript files, each of which will need a separate download, but they may be collectively small. This could help you identify an opportunity to improve efficiency by combining multiple files into one. :::{image} ../../../images/observability-synthetics-analyze-one-step-object.png -:alt: Object visualization for one step in a browser monitor in the {synthetics-app} +:alt: Object visualization for one step in a browser monitor in the {{synthetics-app}} :class: screenshot ::: @@ -240,7 +240,7 @@ The colored bars within each line indicate the time spent per resource. Each col Understanding each phase of a request can help you improve your site’s speed by reducing the time spent in each phase. :::{image} ../../../images/observability-synthetics-analyze-one-step-network.png -:alt: Network requests waterfall visualization for one step in a browser monitor in the {synthetics-app} +:alt: Network requests waterfall visualization for one step in a browser monitor in the {{synthetics-app}} :class: screenshot ::: diff --git a/raw-migrated-files/observability-docs/observability/synthetics-get-started-ui.md b/raw-migrated-files/observability-docs/observability/synthetics-get-started-ui.md index a0a3d31cde..ee0922292a 100644 --- a/raw-migrated-files/observability-docs/observability/synthetics-get-started-ui.md +++ b/raw-migrated-files/observability-docs/observability/synthetics-get-started-ui.md @@ -62,7 +62,7 @@ To use the {{synthetics-app}} to add a lightweight monitor: If you’ve [added a {{private-location}}](../../../solutions/observability/apps/monitor-resources-on-private-networks.md), you’ll see your new {{private-location}} in the list of *Locations*. :::{image} ../../../images/observability-private-locations-monitor-locations.png -:alt: Screenshot of Monitor locations options including a {private-location} +:alt: Screenshot of Monitor locations options including a {{private-location}} :class: screenshot ::: @@ -93,7 +93,7 @@ To use the {{synthetics-app}} to add a browser monitor: 5. Add steps to the **Script editor** code block directly. The `journey` keyword isn’t required, and variables like `page` and `params` will be part of your script’s scope. You cannot `import` any dependencies when using inline browser monitors. :::{image} ../../../images/observability-synthetics-ui-inline-script.png - :alt: Configure a synthetic monitor using an inline script in Elastic {fleet} + :alt: Configure a synthetic monitor using an inline script in Elastic {{fleet}} :class: screenshot ::: diff --git a/raw-migrated-files/observability-docs/observability/synthetics-get-started.md b/raw-migrated-files/observability-docs/observability/synthetics-get-started.md index cb628c4cad..3c290d7f5e 100644 --- a/raw-migrated-files/observability-docs/observability/synthetics-get-started.md +++ b/raw-migrated-files/observability-docs/observability/synthetics-get-started.md @@ -5,8 +5,8 @@ To set up a synthetic monitor, you need to configure the monitor, run it, and se $$$uptime-set-up-choose$$$ There are two ways to set up a synthetic monitor: -* {project-monitors-cap} -* The {synthetics-app} +* {{project-monitors-cap}} +* The {{synthetics-app}} Read more about each option below, and choose the approach that works best for you. diff --git a/raw-migrated-files/security-docs/security/advanced-settings.md b/raw-migrated-files/security-docs/security/advanced-settings.md index ff2d61f246..c7c729e11c 100644 --- a/raw-migrated-files/security-docs/security/advanced-settings.md +++ b/raw-migrated-files/security-docs/security/advanced-settings.md @@ -4,7 +4,7 @@ The advanced settings determine: * Which indices {{elastic-sec}} uses to retrieve data * {{ml-cap}} anomaly score display threshold -* The navigation menu style used throughout the {security-app} +* The navigation menu style used throughout the {{security-app}} * Whether the news feed is displayed on the [*Overview dashboard*](../../../solutions/security/dashboards/overview-dashboard.md) * The default time interval used to filter {{elastic-sec}} pages * The default {{elastic-sec}} pages refresh time diff --git a/raw-migrated-files/security-docs/security/cases-manage-settings.md b/raw-migrated-files/security-docs/security/cases-manage-settings.md index 59a6b63ab5..67bc95edc1 100644 --- a/raw-migrated-files/security-docs/security/cases-manage-settings.md +++ b/raw-migrated-files/security-docs/security/cases-manage-settings.md @@ -24,13 +24,13 @@ To close cases when they are sent to an external system, select **Automatically You can push {{elastic-sec}} cases to these third-party systems: -* {sn-itsm} -* {sn-sir} +* {{sn-itsm}} +* {{sn-sir}} * {{jira}} (including Jira Service Desk) -* {ibm-r} -* {swimlane} -* {hive} -* {webhook-cm} +* {{ibm-r}} +* {{swimlane}} +* {{hive}} +* {{webhook-cm}} To push cases, you need to create a connector, which stores the information required to interact with an external system. After you have created a connector, you can set {{elastic-sec}} cases to automatically close when they are sent to external systems. diff --git a/raw-migrated-files/security-docs/security/cases-overview.md b/raw-migrated-files/security-docs/security/cases-overview.md index be3b052837..d1c99bfccc 100644 --- a/raw-migrated-files/security-docs/security/cases-overview.md +++ b/raw-migrated-files/security-docs/security/cases-overview.md @@ -4,12 +4,12 @@ Collect and share information about security issues by opening a case in {{elast You can also send cases to these external systems by [configuring external connectors](../../../solutions/security/investigate/configure-case-settings.md#cases-ui-integrations): -* {sn-itsm} -* {sn-sir} +* {{sn-itsm}} +* {{sn-sir}} * {{jira}} (including Jira Service Desk) -* {ibm-r} -* {swimlane} -* {webhook-cm} +* {{ibm-r}} +* {{swimlane}} +* {{webhook-cm}} :::{image} ../../../images/security-cases-home-page.png :alt: Case UI Home diff --git a/raw-migrated-files/security-docs/security/connect-to-byo-llm.md b/raw-migrated-files/security-docs/security/connect-to-byo-llm.md index 0399990c73..163ccba3d5 100644 --- a/raw-migrated-files/security-docs/security/connect-to-byo-llm.md +++ b/raw-migrated-files/security-docs/security/connect-to-byo-llm.md @@ -203,7 +203,7 @@ Finally, configure the connector: 8. Click **Save**. :::{image} ../../../images/security-lms-edit-connector.png -:alt: The Edit connector page in the {security-app} +:alt: The Edit connector page in the {{security-app}} ::: Setup is now complete. You can use the model you’ve loaded in LM Studio to power Elastic’s generative AI features. You can test a variety of models as you interact with AI Assistant to see what works best without having to update your connector. diff --git a/raw-migrated-files/security-docs/security/detection-entity-dashboard.md b/raw-migrated-files/security-docs/security/detection-entity-dashboard.md index 71e7335c75..7ced8dc697 100644 --- a/raw-migrated-files/security-docs/security/detection-entity-dashboard.md +++ b/raw-migrated-files/security-docs/security/detection-entity-dashboard.md @@ -94,7 +94,7 @@ To display the **Entities** section, you must [enable the entity store](../../.. The **Entities** section provides a centralized view of all hosts and users in your environment. It displays entities from the [entity store](../../../solutions/security/advanced-entity-analytics/entity-store.md), which meet any of the following criteria: -* Have been observed by {elastic-sec} +* Have been observed by {{elastic-sec}} * Have an asset criticality assignment * Have been added to {{elastic-sec}} through an integration, such Active Directory or Okta diff --git a/solutions/observability/apps/fleet-managed-apm-server.md b/solutions/observability/apps/fleet-managed-apm-server.md index fc1b787668..87942eb3d2 100644 --- a/solutions/observability/apps/fleet-managed-apm-server.md +++ b/solutions/observability/apps/fleet-managed-apm-server.md @@ -72,7 +72,7 @@ You can install only a single {{agent}} per host, which means you cannot run {{f 5. Click the **Agents** tab and follow the in-product instructions to add a {{fleet}} server: :::{image} ../../../images/observability-add-fleet-server.png - :alt: In-product instructions for adding a {fleet-server} + :alt: In-product instructions for adding a {{fleet-server}} :class: screenshot ::: diff --git a/solutions/observability/apps/monitor-fleet-managed-apm-server.md b/solutions/observability/apps/monitor-fleet-managed-apm-server.md index 6a06f2832f..7cd52ecbe5 100644 --- a/solutions/observability/apps/monitor-fleet-managed-apm-server.md +++ b/solutions/observability/apps/monitor-fleet-managed-apm-server.md @@ -42,7 +42,7 @@ Before you can monitor APM, you must have monitoring data for the {{es}} product 2. The host to expose logs/metrics on 3. The port to expose logs/metrics on -2. Enroll {agent} +2. Enroll {{agent}} After editing `elastic-agent.yml`, you must re-enroll {{agent}} for the changes to take effect. diff --git a/solutions/observability/apps/switch-an-elastic-cloud-cluster-to-apm-integration.md b/solutions/observability/apps/switch-an-elastic-cloud-cluster-to-apm-integration.md index 76024b373e..4bbed5a5b3 100644 --- a/solutions/observability/apps/switch-an-elastic-cloud-cluster-to-apm-integration.md +++ b/solutions/observability/apps/switch-an-elastic-cloud-cluster-to-apm-integration.md @@ -29,7 +29,7 @@ With a Superuser account, complete the following steps: 1. In {{kib}}, go to the **Applications** app and click **Settings** → **Schema**. :::{image} ../../../images/observability-schema-agent.png - :alt: switch to {agent} + :alt: switch to {{agent}} ::: 2. Click **Switch to {{agent}}**. Make a note of the `apm-server.yml` user settings that are incompatible with {{agent}}. Check the confirmation box and click **Switch to {{agent}}**. diff --git a/solutions/observability/apps/synthetics-support-matrix.md b/solutions/observability/apps/synthetics-support-matrix.md index 64621d8341..a29c679c94 100644 --- a/solutions/observability/apps/synthetics-support-matrix.md +++ b/solutions/observability/apps/synthetics-support-matrix.md @@ -44,7 +44,7 @@ There are various components that make up the Synthetics solution, which are sup * Any *inline* or *Zip URL* monitors configured with the beta Synthetics integration prior to 1.0.0, are not supported and will stop running in the future * Shipped as the `elastic-agent-complete` Docker image - * Must have a direct connection to {es} + * Must have a direct connection to {{es}} * Do not configure any ingest pipelines or Logstash output @@ -65,7 +65,7 @@ There are various components that make up the Synthetics solution, which are sup * **GA support**: As defined in the standard [Support matrix](https://www.elastic.co/support/matrix) * **Notes**: - * For running lightweight monitors via YML configuration running on self-managed infrastructure with standalone {agent} + * For running lightweight monitors via YML configuration running on self-managed infrastructure with standalone {{agent}} * Browser-based monitors are not supported in this configuration * Results for monitors configured using the standalone {{agent}} are available in the {{uptime-app}} (*not* the {{synthetics-app}}) diff --git a/solutions/observability/apps/tutorial-monitor-java-application.md b/solutions/observability/apps/tutorial-monitor-java-application.md index 3276faf265..33e3c0c9b7 100644 --- a/solutions/observability/apps/tutorial-monitor-java-application.md +++ b/solutions/observability/apps/tutorial-monitor-java-application.md @@ -1461,7 +1461,7 @@ java -jar /tmp/apm-agent-attach-1.17.0-standalone.jar --pid 30730 \ This above message will return something like this: ```text -2020-07-10 15:04:48.144 INFO Attaching the Elastic {apm-agent} to 30730 +2020-07-10 15:04:48.144 INFO Attaching the Elastic {{apm-agent}} to 30730 2020-07-10 15:04:49.649 INFO Done ``` diff --git a/solutions/observability/apps/uptime-monitoring-deprecated.md b/solutions/observability/apps/uptime-monitoring-deprecated.md index 07b9f76e55..7554ee17e0 100644 --- a/solutions/observability/apps/uptime-monitoring-deprecated.md +++ b/solutions/observability/apps/uptime-monitoring-deprecated.md @@ -8,7 +8,7 @@ mapped_pages: ::::{admonition} Deprecated in 8.15.0. :class: warning -Use [Synthetic monitoring](synthetic-monitoring.md) instead of the {uptime-app} +Use [Synthetic monitoring](synthetic-monitoring.md) instead of the {{uptime-app}} :::: diff --git a/solutions/observability/apps/use-synthetics-with-traffic-filters.md b/solutions/observability/apps/use-synthetics-with-traffic-filters.md index 115cb21451..aefcd66124 100644 --- a/solutions/observability/apps/use-synthetics-with-traffic-filters.md +++ b/solutions/observability/apps/use-synthetics-with-traffic-filters.md @@ -51,7 +51,7 @@ Once you know the CIDR blocks for your testing sources, add them to your {{es}} For example, if you had a {{private-location}} running with a public CIDR block of `1.2.3.4/32` and were running tests from the `Europe - United Kingdom` region, you would first create a traffic filter with the following: :::{image} ../../../images/observability-synthetics-traffic-filters-create-filter.png -:alt: Create a traffic filter in {cloud} +:alt: Create a traffic filter in {{cloud}} :class: screenshot ::: diff --git a/solutions/observability/cicd.md b/solutions/observability/cicd.md index 6eda11b779..127a9c750d 100644 --- a/solutions/observability/cicd.md +++ b/solutions/observability/cicd.md @@ -47,19 +47,19 @@ The Jenkins health dashboards provide insights on the build executions, the fail :::{image} ../../images/observability-ci-cd-overview.png :alt: CI/CD overview -:title: Jenkins KPIs in Elastic {observability} +:title: Jenkins KPIs in Elastic {{observability}} :class: screenshot ::: :::{image} ../../images/observability-jenkins-kpis.png :alt: Jenkins KPIs -:title: Jenkins Provisioning KPIs in Elastic {observability} +:title: Jenkins Provisioning KPIs in Elastic {{observability}} :class: screenshot ::: :::{image} ../../images/observability-jenkins-jvm-indicators.png :alt: Jenkins JVM health indicators -:title: Jenkins JVM health indicators in Elastic {observability} +:title: Jenkins JVM health indicators in Elastic {{observability}} :class: screenshot ::: @@ -74,7 +74,7 @@ In the following image, a Jenkins CI build failed, and its exceptions are report :::{image} ../../images/observability-jenkins-pipeline-build.png :alt: Jenkins pipeline builds -:title: Jenkins pipeline build error in Elastic {observability} +:title: Jenkins pipeline build error in Elastic {{observability}} :class: screenshot ::: @@ -82,13 +82,13 @@ The Errors overview screen provides a high-level view of the exceptions that CI :::{image} ../../images/observability-jenkins-pipeline-errors.png :alt: Jenkins pipeline build errors -:title: Jenkins jobs and pipelines errors in Elastic {observability} +:title: Jenkins jobs and pipelines errors in Elastic {{observability}} :class: screenshot ::: :::{image} ../../images/observability-concourse-ci-traces.png :alt: Concourse CI traces view -:title: Concourse CI pipeline execution as a trace in Elastic {observability} +:title: Concourse CI pipeline execution as a trace in Elastic {{observability}} :class: screenshot ::: @@ -108,7 +108,7 @@ The Applications Services view in Elastic {{observability}} provides a view of a :::{image} ../../images/observability-jenkins-servers.png :alt: Jenkins servers view -:title: Jenkins servers in Elastic {observability} +:title: Jenkins servers in Elastic {{observability}} :class: screenshot ::: @@ -116,7 +116,7 @@ The Service page provides more granular insights into your CI/CD workflows by br :::{image} ../../images/observability-jenkins-server.png :alt: Jenkins server view -:title: A Jenkins server in Elastic {observability} +:title: A Jenkins server in Elastic {{observability}} :class: screenshot ::: @@ -127,7 +127,7 @@ Once you’ve identified the pipeline you want to troubleshoot, you can drill do :::{image} ../../images/observability-jenkins-pipeline-overview.png :alt: Jenkins pipeline overview -:title: Performance overview of a Jenkins pipeline in Elastic {observability} +:title: Performance overview of a Jenkins pipeline in Elastic {{observability}} :class: screenshot ::: @@ -135,7 +135,7 @@ The pipelines and traditional jobs are instrumented automatically. If you spot a :::{image} ../../images/observability-jenkins-pipeline-trace.png :alt: Trace of a Jenkins pipeline build -:title: A Jenkins pipeline build as a trace in Elastic {observability} +:title: A Jenkins pipeline build as a trace in Elastic {{observability}} :class: screenshot ::: @@ -143,7 +143,7 @@ To investigate further, you can view the details of the build captured as labels :::{image} ../../images/observability-jenkins-pipeline-context.png :alt: Attributes of a Jenkins pipeline execution -:title: Contextual attributes of a Jenkins pipeline execution in Elastic {observability} +:title: Contextual attributes of a Jenkins pipeline execution in Elastic {{observability}} :class: screenshot ::: @@ -185,7 +185,7 @@ Visualizing logs both in Elastic and through Jenkins is recommended because it p The Jenkins OpenTelemetry Plugin provides pipeline log storage in {{es}} while enabling you to visualize the logs in {{kib}} and continue to display them through the Jenkins pipeline build console. :::{image} ../../images/observability-ci-cd-visualize-logs-kibana-and-jenkins-console.png -:alt: Jenkins Console Output page displaying both log contents and a link to view logs in Elastic {observability} +:alt: Jenkins Console Output page displaying both log contents and a link to view logs in Elastic {{observability}} :class: screenshot ::: @@ -202,7 +202,7 @@ This more advanced setup requires connecting the Jenkins Controller to {{es}} wi Visualizing logs exclusively in {{kib}} involves a simpler setup that doesn’t require access to {{es}} from the Jenkins Controller. This is because the Jenkins pipeline build console displays a hyperlink to the {{kib}} logs visualization screen instead of displaying the logs in the Jenkins UI. :::{image} ../../images/observability-ci-cd-visualize-logs-kibana-console.png -:alt: Jenkins Console Output page with link to view logs in Elastic {observability} +:alt: Jenkins Console Output page with link to view logs in Elastic {{observability}} :class: screenshot ::: @@ -459,7 +459,7 @@ make login build push :::{image} ../../images/observability-jenkins-makefile.png :alt: Jenkins build executing an instrumented Makefile -:title: A Jenkins build executing a Makefile instrumented with the otel-cli in Elastic {observability} +:title: A Jenkins build executing a Makefile instrumented with the otel-cli in Elastic {{observability}} :class: screenshot ::: @@ -505,7 +505,7 @@ Once Concourse CI tracing is configured, Concourse CI pipeline executions are re :::{image} ../../images/observability-jenkins-concourse.png :alt: Concourse CI pipeline execution -:title: A Concourse CI pipeline execution in Elastic {observability} +:title: A Concourse CI pipeline execution in Elastic {{observability}} :class: screenshot ::: diff --git a/solutions/observability/cloud/gcp-dataflow-templates.md b/solutions/observability/cloud/gcp-dataflow-templates.md index e7e4f32e0e..eab6ba5a2a 100644 --- a/solutions/observability/cloud/gcp-dataflow-templates.md +++ b/solutions/observability/cloud/gcp-dataflow-templates.md @@ -114,7 +114,7 @@ After creating a Pub/Sub topic and subscription, go to the **Dataflow Jobs** pag To create a job, click **Create Job From Template**. Set **Job name** as `auditlogs-stream` and select `Pub/Sub to Elasticsearch` from the **Dataflow template** dropdown menu: :::{image} ../../../images/observability-monitor-gcp-dataflow-pub-sub-elasticsearch.png -:alt: GCP Dataflow Pub/Sub to {es} +:alt: GCP Dataflow Pub/Sub to {{es}} ::: Before running the job, fill in required parameters: diff --git a/solutions/observability/infra-and-hosts/tutorial-observe-kubernetes-deployments.md b/solutions/observability/infra-and-hosts/tutorial-observe-kubernetes-deployments.md index ca410b0739..6255a8b5a9 100644 --- a/solutions/observability/infra-and-hosts/tutorial-observe-kubernetes-deployments.md +++ b/solutions/observability/infra-and-hosts/tutorial-observe-kubernetes-deployments.md @@ -25,7 +25,7 @@ This guide describes how to use Elastic {{observability}} to observe all layers * Collect logs and metrics from Kubernetes and your applications * Collect trace data from applications deployed with Kubernetes -* Centralize the data in the {stack} +* Centralize the data in the {{stack}} * Explore the data in real-time using tailored dashboards and {{observability}} UIs This guide describes how to deploy Elastic monitoring agents as DaemonSets using the {{agent}} manifest files. For other deployment options, see the Kubernetes operator and custom resource definitions from [{{ecloud}} on Kubernetes (ECK)](https://www.elastic.co/guide/en/cloud-on-k8s/current/index.html). diff --git a/solutions/observability/infra-and-hosts/upgrade-universal-profiling.md b/solutions/observability/infra-and-hosts/upgrade-universal-profiling.md index 381c2119ee..716ec98e1d 100644 --- a/solutions/observability/infra-and-hosts/upgrade-universal-profiling.md +++ b/solutions/observability/infra-and-hosts/upgrade-universal-profiling.md @@ -18,7 +18,7 @@ For self-hosted installations, refer to [Upgrade Universal Profiling in self-hos To upgrade from all versions earlier than 8.10 (GA), you need to: -1. Perform a stack upgrade in {ecloud} +1. Perform a stack upgrade in {{ecloud}} 2. Stop profiling data ingestion 3. Delete existing profiling data 4. Setup Universal Profiling from scratch @@ -26,7 +26,7 @@ To upgrade from all versions earlier than 8.10 (GA), you need to: To upgrade from version 8.10 or later, you need to: -1. Perform a stack upgrade in {ecloud} +1. Perform a stack upgrade in {{ecloud}} ### Perform a stack upgrade in the {{ecloud}} console [profiling-upgrade-in-cloud] diff --git a/solutions/search/ranking/learning-to-rank-model-training.md b/solutions/search/ranking/learning-to-rank-model-training.md index c14b723794..3b491d5dee 100644 --- a/solutions/search/ranking/learning-to-rank-model-training.md +++ b/solutions/search/ranking/learning-to-rank-model-training.md @@ -28,7 +28,7 @@ We highly recommend using [eland](https://eland.readthedocs.io/) in your workflo * Configure feature extraction * Extract features for training -* Deploy the model in {es} +* Deploy the model in {{es}} ### Configure feature extraction in Eland [learning-to-rank-model-training-feature-definition] diff --git a/solutions/search/ranking/semantic-reranking.md b/solutions/search/ranking/semantic-reranking.md index dcc92e2dbe..011a8f4119 100644 --- a/solutions/search/ranking/semantic-reranking.md +++ b/solutions/search/ranking/semantic-reranking.md @@ -138,5 +138,5 @@ POST _search * Read the [retriever reference documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search#operation-search-body-application-json-retriever) for syntax and implementation details * Learn more about the [retrievers](../querying-for-search.md) abstraction * Learn more about the Elastic [Inference APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-inference) -* Check out our [Python notebook](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations/cohere/cohere-elasticsearch.ipynb) for using Cohere with {es} +* Check out our [Python notebook](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations/cohere/cohere-elasticsearch.ipynb) for using Cohere with {{es}} diff --git a/solutions/search/search-applications/search-application-security.md b/solutions/search/search-applications/search-application-security.md index ffa5f556ee..4cff7fba00 100644 --- a/solutions/search/search-applications/search-application-security.md +++ b/solutions/search/search-applications/search-application-security.md @@ -21,9 +21,9 @@ This guide describes best practices when taking the second approach. Specificall This approach has a few advantages: -* No need to maintain a passthrough query system between frontend applications and {es} +* No need to maintain a passthrough query system between frontend applications and {{es}} * Direct requests to {{es}} result in faster response times -* Query configuration is managed in one place: your search application configuration in {es} +* Query configuration is managed in one place: your search application configuration in {{es}} We will cover: diff --git a/troubleshoot/elasticsearch/troubleshoot-migrate-to-tiers.md b/troubleshoot/elasticsearch/troubleshoot-migrate-to-tiers.md index 33c3264673..31baef4b97 100644 --- a/troubleshoot/elasticsearch/troubleshoot-migrate-to-tiers.md +++ b/troubleshoot/elasticsearch/troubleshoot-migrate-to-tiers.md @@ -35,7 +35,7 @@ In order to get the shards assigned we need to call the [migrate to data tiers r :class: screenshot ::: -4. First, let’s [stop](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop) {ilm} +4. First, let’s [stop](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop) {{ilm}} ```console POST /_ilm/stop @@ -88,7 +88,7 @@ In order to get the shards assigned we need to call the [migrate to data tiers r 4. The composable index templates that were updated to not contain custom routing settings for the provided data attribute. 5. The component templates that were updated to not contain custom routing settings for the provided data attribute. -7. [Restart](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start) {ilm} +7. [Restart](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start) {{ilm}} ```console POST /_ilm/start @@ -112,7 +112,7 @@ In order to get the shards assigned we need to make sure the deployment is using node.roles [ data_hot, data_content ] ``` -2. [Stop](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop) {ilm} +2. [Stop](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop) {{ilm}} ```console POST /_ilm/stop @@ -165,7 +165,7 @@ In order to get the shards assigned we need to make sure the deployment is using 4. The composable index templates that were updated to not contain custom routing settings for the provided data attribute. 5. The component templates that were updated to not contain custom routing settings for the provided data attribute. -5. [Restart](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start) {ilm} +5. [Restart](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start) {{ilm}} ```console POST /_ilm/start diff --git a/troubleshoot/ingest/fleet/common-problems.md b/troubleshoot/ingest/fleet/common-problems.md index 5d5bcba810..29b47a7db3 100644 --- a/troubleshoot/ingest/fleet/common-problems.md +++ b/troubleshoot/ingest/fleet/common-problems.md @@ -249,7 +249,7 @@ You will also need to set `ssl.verification_mode: none` in the Output settings i To enroll in {{fleet}}, {{agent}} must connect to the {{fleet-server}} instance. If the agent is unable to connect, you see the following failure: ```txt -fail to enroll: fail to execute request to {fleet-server}:Post http://fleet-server:8220/api/fleet/agents/enroll?: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) +fail to enroll: fail to execute request to {{fleet-server}}:Post http://fleet-server:8220/api/fleet/agents/enroll?: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) ``` Here are several steps to help you troubleshoot the problem. diff --git a/troubleshoot/kibana/alerts.md b/troubleshoot/kibana/alerts.md index c0958e34e2..762cd0a566 100644 --- a/troubleshoot/kibana/alerts.md +++ b/troubleshoot/kibana/alerts.md @@ -199,7 +199,7 @@ This approach should be used only temporarily as a last resort to restore functi ## Limitations [alerting-limitations] -The following limitations and known problems apply to the 9.0.0-beta1 release of the {{kib}} {alert-features}: +The following limitations and known problems apply to the 9.0.0-beta1 release of the {{kib}} {{alert-features}}: ### Alert visibility [_alert_visibility] diff --git a/troubleshoot/kibana/task-manager.md b/troubleshoot/kibana/task-manager.md index 14990fb845..4c756f2f34 100644 --- a/troubleshoot/kibana/task-manager.md +++ b/troubleshoot/kibana/task-manager.md @@ -924,7 +924,7 @@ For details on scaling Task Manager, see [Scaling guidance](../../deploy-manage/ Tasks are not running, and the server logs contain the following error message: ```txt -[warning][plugins][taskManager] Task Manager cannot operate when inline scripts are disabled in {es} +[warning][plugins][taskManager] Task Manager cannot operate when inline scripts are disabled in {{es}} ``` **Solution**: diff --git a/troubleshoot/observability/inspect.md b/troubleshoot/observability/inspect.md index 0d1c7ac34f..0e41319ebb 100644 --- a/troubleshoot/observability/inspect.md +++ b/troubleshoot/observability/inspect.md @@ -8,7 +8,7 @@ mapped_pages: The **Inspect** view in {{kib}} allows you to view information about all requests that were made to collect the data displayed on the current page. :::{image} ../../images/observability-inspect-flyout.png -:alt: Inspector flyout in the {uptime-app} +:alt: Inspector flyout in the {{uptime-app}} :class: screenshot :::