From 041dde5c5ffe28587865c4daba57d7ec31d2c708 Mon Sep 17 00:00:00 2001 From: Fabrizio Ferri Benedetti Date: Thu, 10 Apr 2025 17:53:58 +0200 Subject: [PATCH 1/4] Remove pleases --- README.md | 2 +- .../cloud-organization/billing/billing-faq.md | 4 ++-- .../alternative-install-ece-with-ansible.md | 2 +- .../deploy/cloud-enterprise/configure-host-rhel.md | 4 ++-- deploy-manage/deploy/cloud-enterprise/ece-ha.md | 2 +- .../deploy/cloud-enterprise/ece-hardware-prereq.md | 2 +- .../deploy/cloud-enterprise/ece-wildcard-dns.md | 2 +- .../cloud-enterprise/migrate-ece-to-podman-hosts.md | 4 ++-- .../deploy/cloud-enterprise/migrate-to-podman-5.md | 2 +- .../deploy/cloud-on-k8s/configuration-fleet.md | 2 +- .../deploy/cloud-on-k8s/pod-prestop-hook.md | 4 ++-- .../deploy/cloud-on-k8s/troubleshooting-beats.md | 2 +- .../elastic-cloud/azure-marketplace-pricing.md | 2 +- .../deploy/elastic-cloud/azure-native-isv-service.md | 8 ++++---- deploy-manage/deploy/elastic-cloud/cloud-hosted.md | 2 +- ...-pay-as-you-go-subscription-on-aws-marketplace.md | 2 +- ...-pay-as-you-go-subscription-on-gcp-marketplace.md | 2 +- .../google-cloud-platform-marketplace.md | 4 ++-- .../elastic-cloud/restrictions-known-problems.md | 4 ++-- .../elastic-cloud/upload-custom-plugins-bundles.md | 2 +- .../install-elasticsearch-docker-basic.md | 2 +- .../install-elasticsearch-with-docker.md | 2 +- .../self-managed/install-elasticsearch-with-rpm.md | 2 +- deploy-manage/monitor/autoops/ec-autoops-faq.md | 2 +- .../monitor/kibana-task-manager-health-monitoring.md | 2 +- .../orchestrators/ece-monitoring-ece-access.md | 2 +- .../kibana-in-production-environments.md | 2 +- deploy-manage/security/elastic-cloud-static-ips.md | 2 +- deploy-manage/security/fips-140-2.md | 2 +- .../logfile-audit-events-ignore-policies.md | 2 +- .../manage-security-certificates.md | 2 +- deploy-manage/security/traffic-filtering.md | 2 +- .../snapshot-and-restore/elastic-cloud-hosted.md | 2 +- .../repository-isolation-on-aws-gcp.md | 2 +- .../repository-isolation-on-azure.md | 2 +- .../tools/snapshot-and-restore/s3-repository.md | 4 ++-- .../tools/snapshot-and-restore/self-managed.md | 2 +- deploy-manage/uninstall/delete-a-cloud-deployment.md | 2 +- .../deployment-or-cluster/saved-object-migrations.md | 2 +- .../upgrade/orchestrator/upgrade-cloud-enterprise.md | 2 +- .../cluster-or-deployment-auth/built-in-roles.md | 4 ++-- explore-analyze/ai-assistant.md | 4 ++-- explore-analyze/alerts-cases/watcher/actions-jira.md | 2 +- .../alerts-cases/watcher/actions-pagerduty.md | 4 ++-- .../alerts-cases/watcher/actions-slack.md | 2 +- explore-analyze/alerts-cases/watcher/actions.md | 2 +- explore-analyze/index.md | 6 +++--- .../anomaly-detection/anomaly-detection-scale.md | 4 ++-- .../anomaly-detection/ml-getting-started.md | 2 +- .../data-frame-analytics/ml-trained-models.md | 2 +- .../machine-learning/nlp/ml-nlp-limitations.md | 2 +- .../languages/sql-client-apps-dbeaver.md | 2 +- .../query-filter/languages/sql-client-apps-excel.md | 2 +- .../languages/sql-client-apps-microstrat.md | 2 +- .../languages/sql-client-apps-powerbi.md | 2 +- .../query-filter/languages/sql-client-apps-ps1.md | 2 +- .../query-filter/languages/sql-client-apps-qlik.md | 2 +- .../languages/sql-client-apps-squirrel.md | 2 +- .../languages/sql-client-apps-tableau-desktop.md | 2 +- .../languages/sql-client-apps-tableau-server.md | 2 +- .../languages/sql-client-apps-workbench.md | 2 +- .../query-filter/languages/sql-client-apps.md | 2 +- .../query-filter/languages/sql-concepts.md | 2 +- explore-analyze/transforms/transform-limitations.md | 4 ++-- ...o-elasticsearch-service-with-logstash-as-proxy.md | 2 +- manage-data/ingest/upload-data-files.md | 2 +- manage-data/lifecycle/rollup.md | 2 +- manage-data/lifecycle/rollup/getting-started-api.md | 2 +- .../rollup/rollup-aggregation-limitations.md | 2 +- .../lifecycle/rollup/rollup-search-limitations.md | 2 +- manage-data/lifecycle/rollup/understanding-groups.md | 2 +- reference/fleet/agent-policy.md | 2 +- reference/fleet/air-gapped.md | 4 ++-- reference/fleet/data-streams-scenario3.md | 2 +- reference/fleet/elastic-agent-inputs-list.md | 2 +- reference/fleet/fleet-agent-proxy-managed.md | 4 ++-- reference/fleet/index.md | 2 +- reference/fleet/kafka-output-settings.md | 4 ++-- reference/fleet/kafka-output.md | 2 +- reference/fleet/logstash-output.md | 2 +- reference/fleet/ls-output-settings.md | 2 +- reference/fleet/manage-integrations.md | 2 +- reference/fleet/migrate-auditbeat-to-agent.md | 2 +- reference/fleet/running-on-aks-managed-by-fleet.md | 2 +- reference/fleet/running-on-gke-managed-by-fleet.md | 2 +- reference/fleet/scaling-on-kubernetes.md | 2 +- reference/observability/fields-and-object-schemas.md | 2 +- solutions/observability/apm/collect-metrics.md | 2 +- .../observability/apm/configure-kibana-endpoint.md | 2 +- .../observability/apm/configure-project-paths.md | 2 +- solutions/observability/apm/transaction-sampling.md | 2 +- ...pstream-opentelemetry-collectors-language-sdks.md | 2 +- .../monitor-amazon-web-services-aws-with-beats.md | 2 +- .../cloud/monitor-google-cloud-platform-gcp.md | 2 +- .../cloud/monitor-microsoft-azure-with-beats.md | 2 +- .../semantic-search/semantic-search-inference.md | 2 +- solutions/search/site-or-app/search-ui.md | 6 +++--- solutions/search/vector/knn.md | 2 +- solutions/security/ai/ai-assistant.md | 4 ++-- .../detect-and-alert/create-detection-rule.md | 2 +- troubleshoot/elasticsearch/add-tier.md | 2 +- .../elasticsearch/allow-all-cluster-allocation.md | 2 +- .../elasticsearch/allow-all-index-allocation.md | 2 +- .../elasticsearch/decrease-disk-usage-data-node.md | 4 ++-- .../elasticsearch/diagnose-unassigned-shards.md | 6 +++--- .../diagnosing-corrupted-repositories.md | 2 +- .../missing-required-property.md | 4 ++-- .../elasticsearch/increase-capacity-data-node.md | 4 ++-- .../elasticsearch/increase-cluster-shard-limit.md | 2 +- troubleshoot/elasticsearch/increase-shard-limit.md | 2 +- troubleshoot/elasticsearch/increase-tier-capacity.md | 2 +- troubleshoot/elasticsearch/mapping-explosion.md | 2 +- troubleshoot/elasticsearch/remote-clusters.md | 4 ++-- .../elasticsearch/repeated-snapshot-failures.md | 2 +- troubleshoot/elasticsearch/restore-from-snapshot.md | 4 ++-- .../elasticsearch/security/trb-security-kerberos.md | 8 ++++---- .../elasticsearch/security/trb-security-saml.md | 2 +- .../elasticsearch/security/trb-security-setup.md | 4 ++-- .../elasticsearch/security/trb-security-ssl.md | 8 ++++---- troubleshoot/elasticsearch/start-ilm.md | 4 ++-- .../elasticsearch/troubleshoot-migrate-to-tiers.md | 2 +- .../troubleshooting-shards-capacity-issues.md | 4 ++-- troubleshoot/ingest/elastic-serverless-forwarder.md | 2 +- troubleshoot/ingest/fleet/common-problems.md | 10 +++++----- troubleshoot/ingest/logstash.md | 6 +++--- troubleshoot/kibana/migration-failures.md | 4 ++-- troubleshoot/kibana/task-manager.md | 2 +- troubleshoot/observability/amazon-data-firehose.md | 2 +- .../observability/apm-agent-dotnet/apm-net-agent.md | 4 ++-- .../observability/apm-agent-go/apm-go-agent.md | 4 ++-- .../observability/apm-agent-java/apm-java-agent.md | 12 ++++++------ .../apm-agent-nodejs/apm-nodejs-agent.md | 6 +++--- .../observability/apm-agent-php/apm-php-agent.md | 4 ++-- .../apm-real-user-monitoring-javascript-agent.md | 4 ++-- .../observability/apm-agent-swift/apm-ios-agent.md | 4 ++-- .../apm/_agent_is_not_instrumenting_code.md | 2 +- .../apm/_collection_of_diagnostic_information.md | 2 +- troubleshoot/observability/apm/common-problems.md | 4 ++-- troubleshoot/observability/troubleshoot-logs.md | 2 +- ...hoot-your-universal-profiling-agent-deployment.md | 4 ++-- troubleshoot/security/detection-rules.md | 2 +- 141 files changed, 201 insertions(+), 201 deletions(-) diff --git a/README.md b/README.md index 5f35bc5277..ab787ad3d6 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ This repo contains source files for Elastic documentation. ## Contribute -If you find any bugs in our documentation, or want to request an enhancement, please [open an issue](https://github.com/elastic/docs-content/issues). We also welcome contributions in the form of PRs. Before you submit a PR, make sure that you have signed our [Contributor License Agreement](https://www.elastic.co/contributor-agreement/). +If you find any bugs in our documentation, or want to request an enhancement, [open an issue](https://github.com/elastic/docs-content/issues). We also welcome contributions in the form of PRs. Before you submit a PR, make sure that you have signed our [Contributor License Agreement](https://www.elastic.co/contributor-agreement/). We write our docs in markdown. See our [syntax guide](https://elastic.github.io/docs-builder/syntax/index.html) for examples and additional functionality. diff --git a/deploy-manage/cloud-organization/billing/billing-faq.md b/deploy-manage/cloud-organization/billing/billing-faq.md index e433111e59..7c994be6f4 100644 --- a/deploy-manage/cloud-organization/billing/billing-faq.md +++ b/deploy-manage/cloud-organization/billing/billing-faq.md @@ -69,7 +69,7 @@ $$$faq-payment$$$What are the available payment methods on {{ecloud}}? : For month-to-month payments only credit cards are accepted. We also allow payments by bank transfer for annual subscriptions. $$$faq-contact$$$Who can I contact for more information? -: If you have any further questions about your credit card statement, billing, or receipts, please send an email to `ar@elastic.co` or open a [Support case](../../../troubleshoot/index.md) using the *Billing issue* category. +: If you have any further questions about your credit card statement, billing, or receipts, send an email to `ar@elastic.co` or open a [Support case](../../../troubleshoot/index.md) using the *Billing issue* category. $$$faq-charge$$$Why is my credit card charged? : If you are on a monthly plan, the charge is a recurring fee for using {{ecloud}}. The fee is normally charged at the start of each month, but it can also be charged at other times during the month. If a charge is unsuccessful, we will try to charge your card again at a later date. @@ -90,7 +90,7 @@ $$$faq-deleteaccount$$$How can I delete my {{ecloud}} account? : To have your account removed, you can contact support through the {{ecloud}} [Support form](https://cloud.elastic.co/support?page=docs&placement=docs-body) or use one of these [alternative contact methods](../../../troubleshoot/index.md). For details about our data erasure policy, check [Privacy Rights and Choices](https://www.elastic.co/legal/privacy-statement#privacy-rights-and-choices?page=docs&placement=docs-body) in our General Privacy Statement. $$$faq-refund$$$Can I get a refund? -: Charges are non-refundable, but once you delete a deployment we’ll stop charging you for that deployment immediately. You only pay for what you use and you can stop using the service at any time. For any special considerations warranting a potential refund, please use the {{ecloud}} Console [Support form](https://cloud.elastic.co/support?page=docs&placement=docs-body) to open a support case and select *Billing issue* as the category. To ensure quick processing, be sure to provide detail about the reasons for the refund request as well as other matters pertaining to the issue. For other ways to open a Support case, check [Contact us](../../../troubleshoot/index.md). +: Charges are non-refundable, but once you delete a deployment we’ll stop charging you for that deployment immediately. You only pay for what you use and you can stop using the service at any time. For any special considerations warranting a potential refund, use the {{ecloud}} Console [Support form](https://cloud.elastic.co/support?page=docs&placement=docs-body) to open a support case and select *Billing issue* as the category. To ensure quick processing, be sure to provide detail about the reasons for the refund request as well as other matters pertaining to the issue. For other ways to open a Support case, check [Contact us](../../../troubleshoot/index.md). $$$faq-included$$$What is included in my paid {{ech}} deployment? : All subscription tiers for {{ech}} include the following free allowance: diff --git a/deploy-manage/deploy/cloud-enterprise/alternative-install-ece-with-ansible.md b/deploy-manage/deploy/cloud-enterprise/alternative-install-ece-with-ansible.md index 587aa6f4eb..49c4b7cf54 100644 --- a/deploy-manage/deploy/cloud-enterprise/alternative-install-ece-with-ansible.md +++ b/deploy-manage/deploy/cloud-enterprise/alternative-install-ece-with-ansible.md @@ -11,5 +11,5 @@ navigation_title: Ansible playbook If you already use Ansible in your business for provisioning, configuration management, and application deployment, you can use the ECE Ansible playbook to get up and running with {{ece}} faster, on any cloud provider. -Please note that the ECE Ansible playbook is a community project, supported by Elastic, available on GitHub: [elastic/ansible-elastic-cloud-enterprise](https://github.com/elastic/ansible-elastic-cloud-enterprise). Elastic welcomes all community contributions to the repository and will validate any changes on a best-effort basis. +Note that the ECE Ansible playbook is a community project, supported by Elastic, available on GitHub: [elastic/ansible-elastic-cloud-enterprise](https://github.com/elastic/ansible-elastic-cloud-enterprise). Elastic welcomes all community contributions to the repository and will validate any changes on a best-effort basis. diff --git a/deploy-manage/deploy/cloud-enterprise/configure-host-rhel.md b/deploy-manage/deploy/cloud-enterprise/configure-host-rhel.md index c2c2779794..7120e27e6d 100644 --- a/deploy-manage/deploy/cloud-enterprise/configure-host-rhel.md +++ b/deploy-manage/deploy/cloud-enterprise/configure-host-rhel.md @@ -139,7 +139,7 @@ Verify that required traffic is allowed. Check the [Networking prerequisites](ec [...] ``` -6. If podman requires a proxy in your infrastructure setup, modify the `/usr/share/containers/containers.conf` file and add the `HTTP_PROXY` and `HTTPS_PROXY` environment variables in the [engine] section. Please note that multiple env variables in that configuration file exists — use the one in the [engine] section. +6. If podman requires a proxy in your infrastructure setup, modify the `/usr/share/containers/containers.conf` file and add the `HTTP_PROXY` and `HTTPS_PROXY` environment variables in the [engine] section. Note that multiple env variables in that configuration file exists — use the one in the [engine] section. Example: @@ -304,7 +304,7 @@ Verify that required traffic is allowed. Check the [Networking prerequisites](ec sudo install -o elastic -g elastic -d -m 700 /mnt/data/docker ``` -25. If you want to use FirewallD, please ensure you meet the [networking prerequisites](ece-networking-prereq.md). Otherwise, you can disable it with: +25. If you want to use FirewallD, ensure you meet the [networking prerequisites](ece-networking-prereq.md). Otherwise, you can disable it with: ```sh sudo systemctl disable firewalld diff --git a/deploy-manage/deploy/cloud-enterprise/ece-ha.md b/deploy-manage/deploy/cloud-enterprise/ece-ha.md index 91125e6a33..fadbdf8531 100644 --- a/deploy-manage/deploy/cloud-enterprise/ece-ha.md +++ b/deploy-manage/deploy/cloud-enterprise/ece-ha.md @@ -57,4 +57,4 @@ If you’re using a [private Docker registry server](ece-install-offline-with-re Avoid deleting containers unless explicitly instructed by Elastic Support or official documentation. Doing so may lead to unexpected issues or loss of access to your {{ece}} platform. For more details, refer to [](/troubleshoot/deployments/cloud-enterprise/troubleshooting-container-engines.md). -If in doubt, please [contact support for help](/troubleshoot/index.md#contact-us). +If in doubt, [contact support for help](/troubleshoot/index.md#contact-us). diff --git a/deploy-manage/deploy/cloud-enterprise/ece-hardware-prereq.md b/deploy-manage/deploy/cloud-enterprise/ece-hardware-prereq.md index e190ec20de..911f341652 100644 --- a/deploy-manage/deploy/cloud-enterprise/ece-hardware-prereq.md +++ b/deploy-manage/deploy/cloud-enterprise/ece-hardware-prereq.md @@ -61,5 +61,5 @@ The size of your ECE deployment has a bearing on the JVM heap sizes that you sho The ECE management services provided by the coordinators and directors require fast SSD storage to work correctly. For smaller deployments that co-locate the ECE management services with proxies and allocators on the same hosts, you must use fast SSD storage for your entire deployment. If SSD-only storage is not feasible, [some of the ECE management services need to be separated](ece-roles.md). ::::{note} -When using SSDs on an external (shared) storage system, please check with your storage vendor whether TRIM [should be disabled](https://www.elastic.co/blog/is-your-elasticsearch-trimmed) on the ECE hosts to avoid unnecessary stress on the storage system. +When using SSDs on an external (shared) storage system, check with your storage vendor whether TRIM [should be disabled](https://www.elastic.co/blog/is-your-elasticsearch-trimmed) on the ECE hosts to avoid unnecessary stress on the storage system. :::: diff --git a/deploy-manage/deploy/cloud-enterprise/ece-wildcard-dns.md b/deploy-manage/deploy/cloud-enterprise/ece-wildcard-dns.md index 9cc136bdad..f21de8ae8d 100644 --- a/deploy-manage/deploy/cloud-enterprise/ece-wildcard-dns.md +++ b/deploy-manage/deploy/cloud-enterprise/ece-wildcard-dns.md @@ -9,7 +9,7 @@ mapped_pages: # Wildcard DNS record [ece-wildcard-dns] ::::{warning} -We do not recommend using `ip.es.io` for production systems. Please set up your own domain name and DNS resolver for production. We do not guarantee uptime with `ip.es.io`. +Don't use `ip.es.io` for production systems. Set up your own domain name and DNS resolver for production. We do not guarantee uptime with `ip.es.io`. :::: By default, {{ece}} uses the external `ip.es.io` service provided by Elastic to resolve virtual {{es}} cluster host names in compliance with RFC1918. The service works by resolving host names of the form `.ip.es.io` to ``. In the case of {{ece}}, each cluster is assigned a virtual host name of the form `..ip.es.io:`, such as `6dfc65aae62341e18a8b7692dcc97186.10.8.156.132.ip.es.io:9243`. The `ip.es.io` service simply resolves the virtual host name of the cluster to the proxy address which is specified during installation, `10.8.156.132` in our example, so that client requests are sent to the proxy. The proxy then extracts the cluster ID from the virtual host name of the cluster and uses its internal routing table to route the request to the right allocator. diff --git a/deploy-manage/deploy/cloud-enterprise/migrate-ece-to-podman-hosts.md b/deploy-manage/deploy/cloud-enterprise/migrate-ece-to-podman-hosts.md index 9331012b63..a3bfd2a1c0 100644 --- a/deploy-manage/deploy/cloud-enterprise/migrate-ece-to-podman-hosts.md +++ b/deploy-manage/deploy/cloud-enterprise/migrate-ece-to-podman-hosts.md @@ -169,7 +169,7 @@ Using Docker or Podman as container runtime is a configuration local to the host [...] ``` -6. If podman requires a proxy in your infrastructure setup, modify the `/usr/share/containers/containers.conf` file and add the `HTTP_PROXY` and `HTTPS_PROXY` environment variables in the [engine] section. Please note that multiple env variables in that configuration file exists — use the one in the [engine] section. +6. If podman requires a proxy in your infrastructure setup, modify the `/usr/share/containers/containers.conf` file and add the `HTTP_PROXY` and `HTTPS_PROXY` environment variables in the [engine] section. Note that multiple env variables in that configuration file exists — use the one in the [engine] section. Example: @@ -334,7 +334,7 @@ Using Docker or Podman as container runtime is a configuration local to the host sudo install -o elastic -g elastic -d -m 700 /mnt/data/docker ``` -25. If you want to use FirewallD, please ensure you meet the [networking prerequisites](ece-networking-prereq.md). Otherwise, you can disable it with: +25. If you want to use FirewallD, ensure you meet the [networking prerequisites](ece-networking-prereq.md). Otherwise, you can disable it with: ```sh sudo systemctl disable firewalld diff --git a/deploy-manage/deploy/cloud-enterprise/migrate-to-podman-5.md b/deploy-manage/deploy/cloud-enterprise/migrate-to-podman-5.md index a17ad2e73c..1eb8a3364a 100644 --- a/deploy-manage/deploy/cloud-enterprise/migrate-to-podman-5.md +++ b/deploy-manage/deploy/cloud-enterprise/migrate-to-podman-5.md @@ -23,7 +23,7 @@ Following are the supported upgrade paths for Podman 5 in {{ece}}. Podman `5.2.2-13` is only supported when conducting a **fresh {{ece}} installation** or performing a **grow-and-shrink update** from Docker or Podman 4. For **in-place updates**, it is recommended to use Podman `5.2.2-9`, since upgrades to versions `5.2.2-11` and `5.2.2-13` are affected by a known [memory leak issue](https://github.com/containers/podman/issues/25473). -When performing an in-place update, please make sure to configure the Podman version to be locked at version `5.2.2-9.*`, by following the instructions below. +When performing an in-place update, make sure to configure the Podman version to be locked at version `5.2.2-9.*`, by following the instructions below. ```sh ## Install versionlock diff --git a/deploy-manage/deploy/cloud-on-k8s/configuration-fleet.md b/deploy-manage/deploy/cloud-on-k8s/configuration-fleet.md index 915420bbd7..93cb5402e7 100644 --- a/deploy-manage/deploy/cloud-on-k8s/configuration-fleet.md +++ b/deploy-manage/deploy/cloud-on-k8s/configuration-fleet.md @@ -241,7 +241,7 @@ spec: ... ``` -Please note that the environment variables related to policy selection mentioned in the {{agent}} [docs](/reference/fleet/agent-environment-variables.md) like `FLEET_SERVER_POLICY_ID` will be managed by the ECK operator. +Note that the environment variables related to policy selection mentioned in the {{agent}} [docs](/reference/fleet/agent-environment-variables.md) like `FLEET_SERVER_POLICY_ID` will be managed by the ECK operator. ## Running as a non-root user [k8s-elastic-agent-running-as-a-non-root-user] diff --git a/deploy-manage/deploy/cloud-on-k8s/pod-prestop-hook.md b/deploy-manage/deploy/cloud-on-k8s/pod-prestop-hook.md index dca565211e..856f918402 100644 --- a/deploy-manage/deploy/cloud-on-k8s/pod-prestop-hook.md +++ b/deploy-manage/deploy/cloud-on-k8s/pod-prestop-hook.md @@ -10,7 +10,7 @@ mapped_pages: When an {{es}} `Pod` is terminated, its `Endpoint` is removed from the `Service` and the {{es}} process is terminated. As these two operations happen in parallel, a race condition exists. If the {{es}} process is already shut down, but the `Endpoint` is still a part of the `Service`, any new connection might fail. For more information, check [Termination of pods](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods). -Moreover, kube-proxy resynchronizes its rules [every 30 seconds by default](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/#options). During that time window of 30 seconds, the terminating Pod IP may still be used when targeting the service. Please note the resync operation itself may take some time, especially if kube-proxy is configured to use iptables with a lot of services and rules to apply. +Moreover, kube-proxy resynchronizes its rules [every 30 seconds by default](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/#options). During that time window of 30 seconds, the terminating Pod IP may still be used when targeting the service. Note the resync operation itself may take some time, especially if kube-proxy is configured to use iptables with a lot of services and rules to apply. To address this issue and minimize unavailability, ECK relies on a [PreStop lifecycle hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/). It waits for an additional `PRE_STOP_ADDITIONAL_WAIT_SECONDS` (defaulting to 50). The additional wait time is used to: @@ -39,5 +39,5 @@ The pre-stop lifecycle hook also tries to gracefully shut down the {{es}} node i This is done on a best effort basis. In particular requests to an {{es}} cluster already in the process of shutting down might fail if the Kubernetes service has already been removed. The script allows for `PRE_STOP_MAX_DNS_ERRORS` which default to 2 before giving up. -When using local persistent volumes a different behaviour might be desirable because the {{es}} node’s associated storage will not be available anymore on the new Kubernetes node. `PRE_STOP_SHUTDOWN_TYPE` allows to override the default shutdown type to one of the [possible values](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node). Please be aware that setting it to anything other than `restart` might mean that the pre-stop hook will run longer than `terminationGracePeriodSeconds` of the Pod while moving data out of the terminating Pod and will not be able to complete unless you also adjust that value in the `podTemplate`. +When using local persistent volumes a different behaviour might be desirable because the {{es}} node’s associated storage will not be available anymore on the new Kubernetes node. `PRE_STOP_SHUTDOWN_TYPE` allows to override the default shutdown type to one of the [possible values](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node). Be aware that setting it to anything other than `restart` might mean that the pre-stop hook will run longer than `terminationGracePeriodSeconds` of the Pod while moving data out of the terminating Pod and will not be able to complete unless you also adjust that value in the `podTemplate`. diff --git a/deploy-manage/deploy/cloud-on-k8s/troubleshooting-beats.md b/deploy-manage/deploy/cloud-on-k8s/troubleshooting-beats.md index 11f23167c2..6b6dce01fc 100644 --- a/deploy-manage/deploy/cloud-on-k8s/troubleshooting-beats.md +++ b/deploy-manage/deploy/cloud-on-k8s/troubleshooting-beats.md @@ -26,7 +26,7 @@ When `kubectl` is used to modify a resource, it calculates the diff between the If you have configured a Beat to run as a `Deployment` and you are using a `hostPath` volume as the Beats data directory, you might encounter an error similar to the following: ```shell -ERROR instance/beat.go:958 Exiting: data path already locked by another beat. Please make sure that multiple beats are not sharing the same data path (path.data). +ERROR instance/beat.go:958 Exiting: data path already locked by another beat. Make sure that multiple beats are not sharing the same data path (path.data). ``` This can happen if the new Pod is scheduled on the same Kubernetes node as the old Pod and is now trying to use the same data directory. Use a [`Recreate`](/deploy-manage/deploy/cloud-on-k8s/configuration-beats.md#k8s-beat-chose-the-deployment-model) deployment strategy to avoid this problem. diff --git a/deploy-manage/deploy/elastic-cloud/azure-marketplace-pricing.md b/deploy-manage/deploy/elastic-cloud/azure-marketplace-pricing.md index 398e00124f..2ef40bf967 100644 --- a/deploy-manage/deploy/elastic-cloud/azure-marketplace-pricing.md +++ b/deploy-manage/deploy/elastic-cloud/azure-marketplace-pricing.md @@ -51,4 +51,4 @@ For customers paying in non-USD currencies, any future prepaid fees, or on-deman ## Our commitment to you [ec_our_commitment_to_you] -We understand that pricing adjustments can raise questions or concerns, and we are here to support you throughout this process. Should you have any inquiries or need assistance, please reach out to your Elastic account representative or Elastic Support at `support@elastic.co`. +We understand that pricing adjustments can raise questions or concerns, and we are here to support you throughout this process. Should you have any inquiries or need assistance, reach out to your Elastic account representative or Elastic Support at `support@elastic.co`. diff --git a/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md b/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md index 7bde604703..29b7fbfec9 100644 --- a/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md +++ b/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md @@ -261,7 +261,7 @@ $$$azure-integration-migrate$$$How do I migrate my data from the classic Azure m $$$azure-integration-no-inbox$$$Can I invite users to my organization, even if they cannot receive emails? -: You can add Azure users as members of your organization even if they don’t have an inbox. Please reach out to Elastic support. +: You can add Azure users as members of your organization even if they don’t have an inbox. Reach out to Elastic support. ## Billing [ec-azure-integration-billing-faq] @@ -467,11 +467,11 @@ $$$azure-integration-deployment-failed-traffic-filter$$$My {{ecloud}} deployment ```txt { "code": "DeploymentFailed", - "message": "At least one resource deployment operation failed. Please list deployment operations for details. Please see https://aka.ms/DeployOperations for usage details.", + "message": "At least one resource deployment operation failed. List deployment operations for details. See https://aka.ms/DeployOperations for usage details.", "details": [ { "code": "500", - "message": "An error occurred during deployment creation. Please try again. If the problem persists, please contact support@elastic.co." + "message": "An error occurred during deployment creation. Try again. If the problem persists, contact support@elastic.co." } ] ``` @@ -510,7 +510,7 @@ Mimicking this metadata by manually adding tags to an {{ecloud}} deployment will $$$azure-integration-logs-not-ingested$$$My {{ecloud}} Azure Native ISV Service logs are not being ingested. : * When you set up monitoring for your Azure services, if your Azure and Elastic resources are in different subscriptions, you need to make sure that the `Microsoft.Elastic` resource provider is registered in the subscription in which the Azure resources exist. Check [How do I monitor my existing Azure services?](#azure-integration-monitor) for details. -* If you are using [IP or Private Link traffic filters](../../security/traffic-filtering.md), please reach out to [the Elastic Support Team](#azure-integration-support). +* If you are using [IP or Private Link traffic filters](../../security/traffic-filtering.md), reach out to [the Elastic Support Team](#azure-integration-support). diff --git a/deploy-manage/deploy/elastic-cloud/cloud-hosted.md b/deploy-manage/deploy/elastic-cloud/cloud-hosted.md index b1270862a2..f0d70b6f33 100644 --- a/deploy-manage/deploy/elastic-cloud/cloud-hosted.md +++ b/deploy-manage/deploy/elastic-cloud/cloud-hosted.md @@ -172,7 +172,7 @@ $$$faq-where$$$**Where are deployments hosted?** $$$faq-vs-aws$$$**What is the difference between {{ech}} and the Amazon {{es}} Service?** : {{ech}} is the only hosted and managed {{es}} service built, managed, and supported by the company behind {{es}}, {{kib}}, {{beats}}, and {{ls}}. With {{ech}}, you always get the latest versions of the software. Our service is built on best practices and years of experience hosting and managing thousands of {{es}} clusters in the Cloud and on premise. For more information, check the following Amazon and Elastic {{es}} Service [comparison page](https://www.elastic.co/aws-elasticsearch-service). - Please note that there is no formal partnership between Elastic and Amazon Web Services (AWS), and Elastic does not provide any support on the AWS {{es}} Service. + Note that there is no formal partnership between Elastic and Amazon Web Services (AWS), and Elastic does not provide any support on the AWS {{es}} Service. $$$faq-aws$$$**Can I use {{ech}} on platforms other than AWS?** diff --git a/deploy-manage/deploy/elastic-cloud/create-monthly-pay-as-you-go-subscription-on-aws-marketplace.md b/deploy-manage/deploy/elastic-cloud/create-monthly-pay-as-you-go-subscription-on-aws-marketplace.md index bc500ff88f..0a88b65253 100644 --- a/deploy-manage/deploy/elastic-cloud/create-monthly-pay-as-you-go-subscription-on-aws-marketplace.md +++ b/deploy-manage/deploy/elastic-cloud/create-monthly-pay-as-you-go-subscription-on-aws-marketplace.md @@ -9,7 +9,7 @@ mapped_pages: # Create a monthly pay-as-you-go subscription on AWS Marketplace [ec-aws-marketplace-conversion] -When subscribing to an annual prepaid subscription to {{ecloud}} on AWS Marketplace, please follow these instructions to obtain a separate pay-as-you-go subscription. This subscription will allow us to continue your {{ecloud}} service through the Marketplace once the contract is expired. You will not get charged twice for the usage under the annual contract. +When subscribing to an annual prepaid subscription to {{ecloud}} on AWS Marketplace, follow these instructions to obtain a separate pay-as-you-go subscription. This subscription will allow us to continue your {{ecloud}} service through the Marketplace once the contract is expired. You will not get charged twice for the usage under the annual contract. 1. Log in to AWS under the same Account ID that you will use to accept the Annual Private Offer. 2. Go to the [AWS Marketplace subscription page for {{ecloud}} pay-as-you-go](https://aws.amazon.com/marketplace/saas/ordering?productId=bb253a6c-e775-4634-bdf0-17bd56a69c36&offerId=b2uzdkwqj7177fqhm39o4snxy). diff --git a/deploy-manage/deploy/elastic-cloud/create-monthly-pay-as-you-go-subscription-on-gcp-marketplace.md b/deploy-manage/deploy/elastic-cloud/create-monthly-pay-as-you-go-subscription-on-gcp-marketplace.md index 4c963cf9ef..1637c95e64 100644 --- a/deploy-manage/deploy/elastic-cloud/create-monthly-pay-as-you-go-subscription-on-gcp-marketplace.md +++ b/deploy-manage/deploy/elastic-cloud/create-monthly-pay-as-you-go-subscription-on-gcp-marketplace.md @@ -9,7 +9,7 @@ mapped_pages: # Create a monthly pay-as-you-go subscription on GCP Marketplace [ec-gcp-marketplace-conversion] -When subscribing to an annual prepaid subscription to {{ecloud}} on GCP Marketplace, please follow these instructions to obtain a separate pay-as-you-go subscription. This subscription will allow us to continue your {{ecloud}} service through the Marketplace once the contract is expired. You will not get charged twice for the usage under the annual contract. +When subscribing to an annual prepaid subscription to {{ecloud}} on GCP Marketplace, follow these instructions to obtain a separate pay-as-you-go subscription. This subscription will allow us to continue your {{ecloud}} service through the Marketplace once the contract is expired. You will not get charged twice for the usage under the annual contract. 1. Go to the [GCP Marketplace listing page for {{ecloud}} pay-as-you-go](https://console.cloud.google.com/marketplace/product/elastic-prod/elastic-cloud). 2. Click **Subscribe** to create a GCP Marketplace subscription under the selected GCP Billing Account. diff --git a/deploy-manage/deploy/elastic-cloud/google-cloud-platform-marketplace.md b/deploy-manage/deploy/elastic-cloud/google-cloud-platform-marketplace.md index 50ae29a4df..03e41929bb 100644 --- a/deploy-manage/deploy/elastic-cloud/google-cloud-platform-marketplace.md +++ b/deploy-manage/deploy/elastic-cloud/google-cloud-platform-marketplace.md @@ -38,7 +38,7 @@ To subscribe to {{ecloud}} through the GCP Marketplace: 7. After signing up, check your inbox to verify the email address you signed up with. Upon verification, you will be asked to create a password, and once created your organization will be set up and you will be logged into it. ::::{note} - Immediately after your first login to {{ecloud}} you may briefly see a banner on the {{ecloud}} user console saying that your account is disconnected. There is sometimes a short delay in activation, but refreshing the page is generally enough time to allow its completion. If this issue persists, please contact support. + Immediately after your first login to {{ecloud}} you may briefly see a banner on the {{ecloud}} user console saying that your account is disconnected. There is sometimes a short delay in activation, but refreshing the page is generally enough time to allow its completion. If this issue persists, contact support. :::: @@ -62,7 +62,7 @@ To prevent downtime, do not remove the currently used billing account before the {{ecloud}} subscriptions through GCP Marketplace are associated with a GCP billing account. In order to change the billing account associated with an {{ecloud}} organization: -* for customers under a Private Offer contract: please reach out to Elastic support and provide the GCP Billing Account, as well as the contact of any reseller information for approval. +* for customers under a Private Offer contract: reach out to Elastic support and provide the GCP Billing Account, as well as the contact of any reseller information for approval. * for pay-as-you-go customers: you need to have purchased and subscribed to {{ecloud}} on the new billing account using the details above—but do not create a new Elastic user or organization (that is, you can skip Steps 5 and 6 in the subscription instructions, above). Once you successfully subscribed with the new billing account, you can contact Elastic support and provide the new billing account ID you wish to move to, which you can find from [GCP’s billing page](https://console.cloud.google.com/billing). The ID is in the format `000000-000000-000000`. If you cancel your {{ecloud}} order on GCP through the [marketplace orders page](https://console.cloud.google.com/marketplace/orders) before the switch to the new billing account has been done, any running deployments will immediately enter a degraded state known as maintenance mode and they will be scheduled for termination in five days. diff --git a/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md b/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md index 972efff6f8..5b754e06c4 100644 --- a/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md +++ b/deploy-manage/deploy/elastic-cloud/restrictions-known-problems.md @@ -68,7 +68,7 @@ $$$ec-restrictions-apis-kibana$$$ ## {{es}} and {{kib}} plugins [ec-restrictions-plugins] * {{kib}} plugins are not supported. -* {{es}} plugins, are not enabled by default for security purposes. Please reach out to support if you would like to enable {{es}} plugins support on your account. +* {{es}} plugins, are not enabled by default for security purposes. Reach out to support if you would like to enable {{es}} plugins support on your account. * Some {{es}} plugins do not apply to {{ecloud}}. For example, you won’t ever need to change discovery, as {{ecloud}} handles how nodes discover one another. % * In {{es}} 5.0 and later, site plugins are no longer supported. This change does not affect the site plugins {{ecloud}} might provide out of the box, such as Kopf or Head, since these site plugins are serviced by our proxies and not {{es}} itself. % * In {{es}} 5.0 and later, site plugins such as Kopf and Paramedic are no longer provided. We recommend that you use our [cluster performance metrics](../../monitor/stack-monitoring.md), [X-Pack monitoring features](../../monitor/stack-monitoring.md) and Kibana’s (6.3+) [Index Management UI](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-mgmt.html) if you want more detailed information or perform index management actions. @@ -145,7 +145,7 @@ To make a seamless migration, after restoring from a snapshot there are some add % ## Known problems [ec-known-problems] -% * There is a known problem affecting clusters with versions 7.7.0 and 7.7.1 due to [a bug in Elasticsearch](https://github.com/elastic/elasticsearch/issues/56739). Although rare, this bug can prevent you from running plans. If this occurs we recommend that you retry the plan, and if that fails please contact support to get your plan through. Because of this bug we recommend you to upgrade to version 7.8 and higher, where the problem has already been addressed. +% * There is a known problem affecting clusters with versions 7.7.0 and 7.7.1 due to [a bug in Elasticsearch](https://github.com/elastic/elasticsearch/issues/56739). Although rare, this bug can prevent you from running plans. If this occurs we recommend that you retry the plan, and if that fails contact support to get your plan through. Because of this bug we recommend you to upgrade to version 7.8 and higher, where the problem has already been addressed. % * A known issue can prevent direct rolling upgrades from {{es}} version 5.6.10 to version 6.3.0. As a workaround, we have removed version 6.3.0 from the [{{ecloud}} Console](https://cloud.elastic.co?page=docs&placement=docs-body) for new cluster deployments and for upgrading existing ones. If you are affected by this issue, check [Rolling upgrades from 5.6.x to 6.3.0 fails with "java.lang.IllegalStateException: commit doesn’t contain history uuid"](https://elastic.my.salesforce.com/articles/Support_Article/Rolling-upgrades-to-6-3-0-from-5-x-fails-with-java-lang-IllegalStateException-commit-doesn-t-contain-history-uuid?popup=false&id=kA0610000005JFG) in our Elastic Support Portal. If these steps do not work or you do not have access to the Support Portal, you can contact `support@elastic.co`. diff --git a/deploy-manage/deploy/elastic-cloud/upload-custom-plugins-bundles.md b/deploy-manage/deploy/elastic-cloud/upload-custom-plugins-bundles.md index 753138ec3c..c4e1757959 100644 --- a/deploy-manage/deploy/elastic-cloud/upload-custom-plugins-bundles.md +++ b/deploy-manage/deploy/elastic-cloud/upload-custom-plugins-bundles.md @@ -248,4 +248,4 @@ https://api.elastic-cloud.com/api/v1/deployments/extensions \ }' ``` -Please refer to the [Extensions API reference](https://www.elastic.co/docs/api/doc/cloud/group/endpoint-extensions) for the complete set of HTTP methods and payloads. +See [Extensions API reference](https://www.elastic.co/docs/api/doc/cloud/group/endpoint-extensions) for the complete set of HTTP methods and payloads. diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md index d2c4214e2a..208b0dffdd 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-docker-basic.md @@ -10,7 +10,7 @@ navigation_title: Single-node cluster Use Docker commands to start a single-node {{es}} cluster for development or testing. You can then run additional Docker commands to add nodes to the test cluster or run {{kib}}. ::::{tip} -* If you just want to test {{es}} in local development, refer to [Run {{es}} locally](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). Please note that this setup is not suitable for production environments. +* If you just want to test {{es}} in local development, refer to [Run {{es}} locally](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). Note that this setup is not suitable for production environments. * This setup doesn’t run multiple {{es}} nodes or {{kib}} by default. To create a multi-node cluster with {{kib}}, use Docker Compose instead. See [Start a multi-node cluster with Docker Compose](/deploy-manage/deploy/self-managed/install-elasticsearch-docker-compose.md). :::: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md index 343bc91272..33ebd981ca 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md @@ -15,7 +15,7 @@ Docker images for {{es}} are available from the Elastic Docker registry. A list ::: ::::{tip} -If you just want to test {{es}} in local development, refer to [Run {{es}} locally](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). Please note that this setup is not suitable for production environments. +If you just want to test {{es}} in local development, refer to [Run {{es}} locally](/deploy-manage/deploy/self-managed/local-development-installation-quickstart.md). Note that this setup is not suitable for production environments. :::: Review the following guides to install {{es}} with Docker: diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md index 598e25a116..91bfd505cb 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md @@ -18,7 +18,7 @@ applies_to: The RPM package for {{es}} can be [downloaded from our website](#install-rpm) or from our [RPM repository](#rpm-repo). It can be used to install {{es}} on any RPM-based system such as OpenSuSE, SLES, Centos, Red Hat, and Oracle Enterprise. ::::{note} -RPM install is not supported on distributions with old versions of RPM, such as SLES 11 and CentOS 5. Please see [Install {{es}} from archive on Linux or MacOS](install-elasticsearch-from-archive-on-linux-macos.md) instead. +RPM install is not supported on distributions with old versions of RPM, such as SLES 11 and CentOS 5. See [Install {{es}} from archive on Linux or MacOS](install-elasticsearch-from-archive-on-linux-macos.md) instead. :::: :::{include} _snippets/trial.md diff --git a/deploy-manage/monitor/autoops/ec-autoops-faq.md b/deploy-manage/monitor/autoops/ec-autoops-faq.md index 8a17197aa2..29df3ac347 100644 --- a/deploy-manage/monitor/autoops/ec-autoops-faq.md +++ b/deploy-manage/monitor/autoops/ec-autoops-faq.md @@ -23,7 +23,7 @@ $$$faq-autoops-supported-versions$$$What versions of {{es}} are supported for {{ : AutoOps supports {es} versions according to the [supported {{stack}} versions](https://www.elastic.co/support/eol). $$$faq-autoops-license$$$How is AutoOps currently licensed? -: AutoOps current feature set is available to {{ech}} customers at all subscription tiers. For more information please refer to the [subscription page](https://www.elastic.co/subscriptions/cloud). +: AutoOps current feature set is available to {{ech}} customers at all subscription tiers. For more information refer to the [subscription page](https://www.elastic.co/subscriptions/cloud). $$$faq-autoops-installation$$$How does AutoOps get installed and why may I not see AutoOps available on specific deployments? : AutoOps is automatically applied to {{es}} clusters on {{ecloud}}, rolling out in phases across CSPs and regions. Read more about AutoOps [roll out](ec-autoops-regions.md) status. diff --git a/deploy-manage/monitor/kibana-task-manager-health-monitoring.md b/deploy-manage/monitor/kibana-task-manager-health-monitoring.md index a4f9810e9b..dce450e4a6 100644 --- a/deploy-manage/monitor/kibana-task-manager-health-monitoring.md +++ b/deploy-manage/monitor/kibana-task-manager-health-monitoring.md @@ -112,7 +112,7 @@ The Runtime `status` indicates whether task executions have exceeded any of the ::::{important} Some tasks (such as [connectors](../manage-connectors.md)) will incorrectly report their status as successful even if the task failed. The runtime and workload block will return data about success and failures and will not take this into consideration. -To get a better sense of action failures, please refer to the [Event log index](../../explore-analyze/alerts-cases/alerts/event-log-index.md) for more accurate context into failures and successes. +To get a better sense of action failures, refer to the [Event log index](../../explore-analyze/alerts-cases/alerts/event-log-index.md) for more accurate context into failures and successes. :::: diff --git a/deploy-manage/monitor/orchestrators/ece-monitoring-ece-access.md b/deploy-manage/monitor/orchestrators/ece-monitoring-ece-access.md index d540568bb8..113434ef20 100644 --- a/deploy-manage/monitor/orchestrators/ece-monitoring-ece-access.md +++ b/deploy-manage/monitor/orchestrators/ece-monitoring-ece-access.md @@ -49,4 +49,4 @@ To access logs and metrics for your deployment: After you select one of the links, {{kib}} opens and shows you a view of the monitoring metrics for the logs or metrics that you selected. -If you are looking for an {{es}} or {{kib}} diagnostic to share with Elastic support, go to the **Operations** page for the deployment and download the diagnostic bundle to attach to your ticket. If logs or an ECE diagnostic are requested by Elastic support, please [run the ECE diagnostics tool](../../../troubleshoot/deployments/cloud-enterprise/run-ece-diagnostics-tool.md). +If you are looking for an {{es}} or {{kib}} diagnostic to share with Elastic support, go to the **Operations** page for the deployment and download the diagnostic bundle to attach to your ticket. If logs or an ECE diagnostic are requested by Elastic support, [run the ECE diagnostics tool](../../../troubleshoot/deployments/cloud-enterprise/run-ece-diagnostics-tool.md). diff --git a/deploy-manage/production-guidance/kibana-in-production-environments.md b/deploy-manage/production-guidance/kibana-in-production-environments.md index 39f840feca..68c8e15df2 100644 --- a/deploy-manage/production-guidance/kibana-in-production-environments.md +++ b/deploy-manage/production-guidance/kibana-in-production-environments.md @@ -21,7 +21,7 @@ Now, Kibana’s resource requirements extend beyond user activity. The system mu Additionally, the task manager enables distributed coordination across multiple {{kib}} instances, allowing {{kib}} to function as a logical cluster in certain aspects. ::::{important} -* {{kib}} does not support rolling [upgrades](/deploy-manage/upgrade/deployment-or-cluster/kibana.md), and deploying mixed versions of {{kib}} can result in data loss or upgrade failures. Please shut down all instances of {{kib}} before performing an upgrade, and ensure all running {{kib}} instances have matching versions. +* {{kib}} does not support rolling [upgrades](/deploy-manage/upgrade/deployment-or-cluster/kibana.md), and deploying mixed versions of {{kib}} can result in data loss or upgrade failures. Shut down all instances of {{kib}} before performing an upgrade, and ensure all running {{kib}} instances have matching versions. * While {{kib}} isn’t resource intensive, we still recommend running {{kib}} separate from your {{es}} data or master nodes. :::: diff --git a/deploy-manage/security/elastic-cloud-static-ips.md b/deploy-manage/security/elastic-cloud-static-ips.md index 9b7a580ed0..093d747886 100644 --- a/deploy-manage/security/elastic-cloud-static-ips.md +++ b/deploy-manage/security/elastic-cloud-static-ips.md @@ -120,7 +120,7 @@ Not suitable usage of egress static IPs to introduce network controls: ::::{warning} :name: ec-warning -Static IP ranges are subject to change. You will need to update your firewall rules when they change to prevent service disruptions. We will announce changes at least 8 weeks in advance (see [example](https://status.elastic.co/incidents/1xs411x77wgh)). Please subscribe to the [{{ecloud}} status page](https://status.elastic.co/) to remain up to date with any changes to the Static IP ranges which you will need to update at your side. +Static IP ranges are subject to change. You will need to update your firewall rules when they change to prevent service disruptions. We will announce changes at least 8 weeks in advance (see [example](https://status.elastic.co/incidents/1xs411x77wgh)). Subscribe to the [{{ecloud}} status page](https://status.elastic.co/) to remain up to date with any changes to the Static IP ranges which you will need to update at your side. :::: diff --git a/deploy-manage/security/fips-140-2.md b/deploy-manage/security/fips-140-2.md index a77d0913a7..1452424b29 100644 --- a/deploy-manage/security/fips-140-2.md +++ b/deploy-manage/security/fips-140-2.md @@ -76,7 +76,7 @@ The following is a high-level overview of the required configuration: Detailed instructions for installation and configuration of a FIPS certified Java security provider is beyond the scope of this document. Specifically, a FIPS certified [JCA](https://docs.oracle.com/en/java/javase/17/security/java-cryptography-architecture-jca-reference-guide.html) and [JSSE](https://docs.oracle.com/en/java/javase/17/security/java-secure-socket-extension-jsse-reference-guide.html) implementation is required so that the JVM uses FIPS validated implementations of NIST recommended cryptographic algorithms. -{{es}} has been tested with Bouncy Castle’s [bc-fips 1.0.2.5](https://repo1.maven.org/maven2/org/bouncycastle/bc-fips/1.0.2.5/bc-fips-1.0.2.5.jar) and [bctls-fips 1.0.19](https://repo1.maven.org/maven2/org/bouncycastle/bctls-fips/1.0.19/bctls-fips-1.0.19.jar). Please refer to the {{es}} [JVM support matrix](https://www.elastic.co/support/matrix#matrix_jvm) for details on which combinations of JVM and security provider are supported in FIPS mode. {{es}} does not ship with a FIPS certified provider. It is the responsibility of the user to install and configure the security provider to ensure compliance with FIPS 140-2. Using a FIPS certified provider will ensure that only approved cryptographic algorithms are used. +{{es}} has been tested with Bouncy Castle’s [bc-fips 1.0.2.5](https://repo1.maven.org/maven2/org/bouncycastle/bc-fips/1.0.2.5/bc-fips-1.0.2.5.jar) and [bctls-fips 1.0.19](https://repo1.maven.org/maven2/org/bouncycastle/bctls-fips/1.0.19/bctls-fips-1.0.19.jar). See the {{es}} [JVM support matrix](https://www.elastic.co/support/matrix#matrix_jvm) for details on which combinations of JVM and security provider are supported in FIPS mode. {{es}} does not ship with a FIPS certified provider. It is the responsibility of the user to install and configure the security provider to ensure compliance with FIPS 140-2. Using a FIPS certified provider will ensure that only approved cryptographic algorithms are used. To configure {{es}} to use additional security provider(s) configure {{es}}'s [JVM property](elasticsearch://reference/elasticsearch/jvm-settings.md#set-jvm-options) `java.security.properties` to point to a file ([example](https://raw.githubusercontent.com/elastic/elasticsearch/main/build-tools-internal/src/main/resources/fips_java.security)) in {{es}}'s `config` directory. Ensure the FIPS certified security provider is configured with the lowest order. This file should contain the necessary configuration to instruct Java to use the FIPS certified security provider. diff --git a/deploy-manage/security/logging-configuration/logfile-audit-events-ignore-policies.md b/deploy-manage/security/logging-configuration/logfile-audit-events-ignore-policies.md index 3633c1de53..69e75a29a4 100644 --- a/deploy-manage/security/logging-configuration/logfile-audit-events-ignore-policies.md +++ b/deploy-manage/security/logging-configuration/logfile-audit-events-ignore-policies.md @@ -20,7 +20,7 @@ The drawback of an audited system is represented by the inevitable performance p **Audit events ignore policies** are a finer way to tune the verbosity of the audit trail. These policies define rules that match audit events which will be *ignored* (read as: not printed). Rules match on the values of attributes of audit events and complement the `include` or `exclude` method. Imagine the corpus of audit events and the policies chopping off unwanted events. With a sole exception, all audit events are subject to the ignore policies. The exception are events of type `security_config_change`, which cannot be filtered out, unless excluded altogether. ::::{important} -When utilizing audit events ignore policies you are acknowledging potential accountability gaps that could render illegitimate actions undetectable. Please take time to review these policies whenever your system architecture changes. +When utilizing audit events ignore policies you are acknowledging potential accountability gaps that could render illegitimate actions undetectable. Take time to review these policies whenever your system architecture changes. :::: A policy is a named set of filter rules. Each filter rule applies to a single event attribute, one of the `users`, `realms`, `actions`, `roles` or `indices` attributes. The filter rule defines a list of [Lucene regexp](elasticsearch://reference/query-languages/query-dsl/regexp-syntax.md), **any** of which has to match the value of the audit event attribute for the rule to match. A policy matches an event if **all** the rules comprising it match the event. An audit event is ignored, therefore not printed, if it matches **any** policy. All other non-matching events are printed as usual. diff --git a/deploy-manage/security/secure-your-elastic-cloud-enterprise-installation/manage-security-certificates.md b/deploy-manage/security/secure-your-elastic-cloud-enterprise-installation/manage-security-certificates.md index 387829983b..f9243d4aec 100644 --- a/deploy-manage/security/secure-your-elastic-cloud-enterprise-installation/manage-security-certificates.md +++ b/deploy-manage/security/secure-your-elastic-cloud-enterprise-installation/manage-security-certificates.md @@ -20,7 +20,7 @@ Cloud UI certificate Proxy certificate : Used to connect securely to {{es}} clusters and {{kib}}. You should use a wildcard certificate rooted at the [cluster endpoint that you set](../../deploy/cloud-enterprise/change-endpoint-urls.md) (`*.example.com`, for example). A wildcard certificate is required, because the first label of the DNS address is distinct for {{es}} clusters and {{kib}} (`bc898abb421843918ebc31a513169a.example.com`, for example). - If you wish to enable [custom endpoint aliases](../../deploy/cloud-enterprise/enable-custom-endpoint-aliases.md) in ECE 2.10 or later, please also follow the directions for adding Subject Alternative Name (SAN) entries to support these aliases. + If you wish to enable [custom endpoint aliases](../../deploy/cloud-enterprise/enable-custom-endpoint-aliases.md) in ECE 2.10 or later, also follow the directions for adding Subject Alternative Name (SAN) entries to support these aliases. ::::{note} If you plan to deploy [Integration Servers](../../deploy/cloud-enterprise/manage-integrations-server.md), you must add two additional wildcard subdomains, `*.fleet.` and `*.apm.`, to the Subject Alternative Names (SANs) attached to the proxy wildcard certificate. Based on the previous example, your proxy certificates should end up with those three wildcards: `*.example.com, `*.fleet.example.com`, and `*.apm.example.com`. diff --git a/deploy-manage/security/traffic-filtering.md b/deploy-manage/security/traffic-filtering.md index 472d95f8cb..70993a5a3a 100644 --- a/deploy-manage/security/traffic-filtering.md +++ b/deploy-manage/security/traffic-filtering.md @@ -116,7 +116,7 @@ Requests rejected by traffic filter have status code `403 Forbidden` and one of ``` ```json -{"ok":false,"message":"Forbidden due to traffic filtering. Please see the Elastic documentation on Traffic Filtering for more information."} +{"ok":false,"message":"Forbidden due to traffic filtering. See the Elastic documentation on Traffic Filtering for more information."} ``` Additionally, traffic filter rejections are logged in ECE proxy logs as `status_reason: BLOCKED_BY_IP_FILTER`. Proxy logs also provide client IP in `client_ip` field. \ No newline at end of file diff --git a/deploy-manage/tools/snapshot-and-restore/elastic-cloud-hosted.md b/deploy-manage/tools/snapshot-and-restore/elastic-cloud-hosted.md index a8165e45b3..7688ee9d94 100644 --- a/deploy-manage/tools/snapshot-and-restore/elastic-cloud-hosted.md +++ b/deploy-manage/tools/snapshot-and-restore/elastic-cloud-hosted.md @@ -116,7 +116,7 @@ The API returns: Depending on the concrete repository implementation the numbers shown for bytes free as well as the number of blobs removed will either be an approximation or an exact result. Any non-zero value for the number of blobs removed implies that unreferenced blobs were found and subsequently cleaned up. -Please note that most of the cleanup operations executed by this endpoint are automatically executed when deleting any snapshot from a repository. If you regularly delete snapshots, you will in most cases not get any or only minor space savings from using this functionality and should lower your frequency of invoking it accordingly. +Note that most of the cleanup operations executed by this endpoint are automatically executed when deleting any snapshot from a repository. If you regularly delete snapshots, you will in most cases not get any or only minor space savings from using this functionality and should lower your frequency of invoking it accordingly. ## Back up a repository [snapshots-repository-backup] diff --git a/deploy-manage/tools/snapshot-and-restore/repository-isolation-on-aws-gcp.md b/deploy-manage/tools/snapshot-and-restore/repository-isolation-on-aws-gcp.md index cfef9f3a1c..e4359ba9aa 100644 --- a/deploy-manage/tools/snapshot-and-restore/repository-isolation-on-aws-gcp.md +++ b/deploy-manage/tools/snapshot-and-restore/repository-isolation-on-aws-gcp.md @@ -29,7 +29,7 @@ If you no longer need access to the snapshot of another deployment, you can remo 3. With **Remove Access**, the snapshot repository will be removed. ::::{note} -If the repository is still in use (for example by mounted searchable snapshots), it can’t be removed. Please first remove any indices stored in this repository. +If the repository is still in use (for example by mounted searchable snapshots), it can’t be removed. Remove any indices stored in this repository first. :::: diff --git a/deploy-manage/tools/snapshot-and-restore/repository-isolation-on-azure.md b/deploy-manage/tools/snapshot-and-restore/repository-isolation-on-azure.md index 43741afcb9..972f1ad432 100644 --- a/deploy-manage/tools/snapshot-and-restore/repository-isolation-on-azure.md +++ b/deploy-manage/tools/snapshot-and-restore/repository-isolation-on-azure.md @@ -36,7 +36,7 @@ If you no longer need the old snapshots, you can remove the repository. By doing 3. With **Remove Access**, the snapshot repository will be removed. ::::{note} -If the repository is still in use (for example by mounted searchable snapshots), it can’t be removed. Please first remove any indices stored in this repository. +If the repository is still in use (for example by mounted searchable snapshots), it can’t be removed. Remove any indices stored in this repository first. :::: diff --git a/deploy-manage/tools/snapshot-and-restore/s3-repository.md b/deploy-manage/tools/snapshot-and-restore/s3-repository.md index a50256e10a..7d6caac4da 100644 --- a/deploy-manage/tools/snapshot-and-restore/s3-repository.md +++ b/deploy-manage/tools/snapshot-and-restore/s3-repository.md @@ -11,7 +11,7 @@ applies_to: You can use AWS S3 as a repository for [Snapshot/Restore](../snapshot-and-restore.md). ::::{note} -If you are looking for a hosted solution of {{es}} on AWS, please visit [https://www.elastic.co/cloud/](https://www.elastic.co/cloud/). +If you are looking for a hosted solution of {{es}} on AWS, visit [https://www.elastic.co/cloud/](https://www.elastic.co/cloud/). :::: See [this video](https://www.youtube.com/watch?v=ACqfyzWf-xs) for a walkthrough of connecting an AWS S3 repository. @@ -384,7 +384,7 @@ By default {{es}} communicates with your storage system using HTTPS, and validat [MinIO](https://minio.io) is an example of a storage system that provides an S3-compatible API. The `s3` repository type allows {{es}} to work with MinIO-backed repositories as well as repositories stored on AWS S3. Other S3-compatible storage systems may also work with {{es}}, but these are not covered by the {{es}} test suite. -There are many systems, including some from very well-known storage vendors, which claim to offer an S3-compatible API despite failing to emulate S3’s behaviour in full. If you are using such a system for your snapshots, consider using a [shared filesystem repository](shared-file-system-repository.md) based on a standardized protocol such as NFS to access your storage system instead. The `s3` repository type requires full compatibility with S3. In particular it must support the same set of API endpoints, with the same parameters, return the same errors in case of failures, and offer consistency and performance at least as good as S3 even when accessed concurrently by multiple nodes. You will need to work with the supplier of your storage system to address any incompatibilities you encounter. Please do not report {{es}} issues involving storage systems which claim to be S3-compatible unless you can demonstrate that the same issue exists when using a genuine AWS S3 repository. +There are many systems, including some from very well-known storage vendors, which claim to offer an S3-compatible API despite failing to emulate S3’s behaviour in full. If you are using such a system for your snapshots, consider using a [shared filesystem repository](shared-file-system-repository.md) based on a standardized protocol such as NFS to access your storage system instead. The `s3` repository type requires full compatibility with S3. In particular it must support the same set of API endpoints, with the same parameters, return the same errors in case of failures, and offer consistency and performance at least as good as S3 even when accessed concurrently by multiple nodes. You will need to work with the supplier of your storage system to address any incompatibilities you encounter. Don't report {{es}} issues involving storage systems which claim to be S3-compatible unless you can demonstrate that the same issue exists when using a genuine AWS S3 repository. You can perform some basic checks of the suitability of your storage system using the [repository analysis API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze). If this API does not complete successfully, or indicates poor performance, then your storage system is not fully compatible with AWS S3 and therefore unsuitable for use as a snapshot repository. However, these checks do not guarantee full compatibility. diff --git a/deploy-manage/tools/snapshot-and-restore/self-managed.md b/deploy-manage/tools/snapshot-and-restore/self-managed.md index 74d0c745e5..d3a627a591 100644 --- a/deploy-manage/tools/snapshot-and-restore/self-managed.md +++ b/deploy-manage/tools/snapshot-and-restore/self-managed.md @@ -120,7 +120,7 @@ The API returns: Depending on the concrete repository implementation the numbers shown for bytes free as well as the number of blobs removed will either be an approximation or an exact result. Any non-zero value for the number of blobs removed implies that unreferenced blobs were found and subsequently cleaned up. -Please note that most of the cleanup operations executed by this endpoint are automatically executed when deleting any snapshot from a repository. If you regularly delete snapshots, you will in most cases not get any or only minor space savings from using this functionality and should lower your frequency of invoking it accordingly. +Note that most of the cleanup operations executed by this endpoint are automatically executed when deleting any snapshot from a repository. If you regularly delete snapshots, you will in most cases not get any or only minor space savings from using this functionality and should lower your frequency of invoking it accordingly. ## Back up a repository [snapshots-repository-backup] diff --git a/deploy-manage/uninstall/delete-a-cloud-deployment.md b/deploy-manage/uninstall/delete-a-cloud-deployment.md index 3968d56aab..c3eab28aec 100644 --- a/deploy-manage/uninstall/delete-a-cloud-deployment.md +++ b/deploy-manage/uninstall/delete-a-cloud-deployment.md @@ -28,7 +28,7 @@ To delete an {{ech}} deployment: When you delete your deployment, billing stops immediately, rounding up to the nearest hour. :::{{warning}} -When deployments are deleted, we erase all data on disk, including snapshots. Snapshots are retained for very a limited amount of time post deletion and we cannot guarantee that deleted deployments can be restored from snapshots for this reason. If you accidentally delete a deployment, please contact support as soon as possible to increase the likelihood of restoring your deployment. +When deployments are deleted, we erase all data on disk, including snapshots. Snapshots are retained for very a limited amount of time post deletion and we cannot guarantee that deleted deployments can be restored from snapshots for this reason. If you accidentally delete a deployment, contact support as soon as possible to increase the likelihood of restoring your deployment. ::: :::{{tip}} diff --git a/deploy-manage/upgrade/deployment-or-cluster/saved-object-migrations.md b/deploy-manage/upgrade/deployment-or-cluster/saved-object-migrations.md index 11c89ecb5b..a5a2aa0de9 100644 --- a/deploy-manage/upgrade/deployment-or-cluster/saved-object-migrations.md +++ b/deploy-manage/upgrade/deployment-or-cluster/saved-object-migrations.md @@ -18,7 +18,7 @@ Each time you upgrade {{kib}}, an upgrade migration is performed to ensure that ::::{warning} -The `kibana.index` and `xpack.tasks.index` configuration settings are obsolete and no longer taken into account in 8.x. If you are using custom index names, please perform the necessary adaptations before attempting to upgrade to 8.x. +The `kibana.index` and `xpack.tasks.index` configuration settings are obsolete and no longer taken into account in 8.x. If you are using custom index names, perform the necessary adaptations before attempting to upgrade to 8.x. :::: diff --git a/deploy-manage/upgrade/orchestrator/upgrade-cloud-enterprise.md b/deploy-manage/upgrade/orchestrator/upgrade-cloud-enterprise.md index 31ae0ad53f..7a932076f1 100644 --- a/deploy-manage/upgrade/orchestrator/upgrade-cloud-enterprise.md +++ b/deploy-manage/upgrade/orchestrator/upgrade-cloud-enterprise.md @@ -126,7 +126,7 @@ To upgrade an {{ece}} installation, download the latest installation script. Log * If your ECE installation was set up using **Podman** instead of Docker, append the `--podman` flag when running the upgrade command. * If your installation uses **SELinux**, append the `--selinux` flag when running the upgrade command. * If you configured a **custom Docker registry** during installation using the `--docker-registry` or `--ece-docker-repository` parameters, include the same parameters when running the upgrade. -* Starting in ECE 3.8.0, `upgrade` requires `--user` and `--pass` arguments, or a path to the `bootstrap-secrets.json` file, if the file does not exist already at the expected default path. Please see [elastic-cloud-enterprise.sh upgrade](cloud://reference/cloud-enterprise/ece-installation-script-upgrade.md) for details. +* Starting in ECE 3.8.0, `upgrade` requires `--user` and `--pass` arguments, or a path to the `bootstrap-secrets.json` file, if the file does not exist already at the expected default path. See [elastic-cloud-enterprise.sh upgrade](cloud://reference/cloud-enterprise/ece-installation-script-upgrade.md) for details. :::: ```sh diff --git a/deploy-manage/users-roles/cluster-or-deployment-auth/built-in-roles.md b/deploy-manage/users-roles/cluster-or-deployment-auth/built-in-roles.md index 7e6814b4fa..73b4393007 100644 --- a/deploy-manage/users-roles/cluster-or-deployment-auth/built-in-roles.md +++ b/deploy-manage/users-roles/cluster-or-deployment-auth/built-in-roles.md @@ -68,7 +68,7 @@ $$$built-in-roles-ingest-user$$$ `ingest_admin` $$$built-in-roles-kibana-dashboard$$$ `kibana_dashboard_only_user` -: (This role is deprecated, please use [{{kib}} feature privileges](../../../deploy-manage/users-roles/cluster-or-deployment-auth/kibana-privileges.md#kibana-feature-privileges) instead). Grants read-only access to the {{kib}} Dashboard in every [space in {{kib}}](/deploy-manage/manage-spaces.md). This role does not have access to editing tools in {{kib}}. +: (This role is deprecated, use [{{kib}} feature privileges](../../../deploy-manage/users-roles/cluster-or-deployment-auth/kibana-privileges.md#kibana-feature-privileges) instead). Grants read-only access to the {{kib}} Dashboard in every [space in {{kib}}](/deploy-manage/manage-spaces.md). This role does not have access to editing tools in {{kib}}. $$$built-in-roles-kibana-system$$$ `kibana_system` : Grants access necessary for the {{kib}} system user to read from and write to the {{kib}} indices, manage index templates and tokens, and check the availability of the {{es}} cluster. It also permits activating, searching, and retrieving user profiles, as well as updating user profile data for the `kibana-*` namespace. This role grants read access to the `.monitoring-*` indices and read and write access to the `.reporting-*` indices. For more information, see [Configuring Security in {{kib}}](/deploy-manage/security.md). @@ -82,7 +82,7 @@ $$$built-in-roles-kibana-admin$$$ `kibana_admin` : Grants access to all {{kib}} features in all spaces. For more information on {{kib}} authorization, see [](/deploy-manage/users-roles/cluster-or-deployment-auth/kibana-privileges.md). $$$built-in-roles-kibana-user$$$ `kibana_user` -: This role is deprecated, please use the [`kibana_admin`](#built-in-roles-kibana-admin) role instead. Grants access to all features in {{kib}}. +: This role is deprecated, use the [`kibana_admin`](#built-in-roles-kibana-admin) role instead. Grants access to all features in {{kib}}. $$$built-in-roles-logstash-admin$$$ `logstash_admin` : Grants access to the `.logstash*` indices for managing configurations, and grants necessary access for logstash-specific APIs exposed by the logstash x-pack plugin. diff --git a/explore-analyze/ai-assistant.md b/explore-analyze/ai-assistant.md index cbc24aff02..e8f112f429 100644 --- a/explore-analyze/ai-assistant.md +++ b/explore-analyze/ai-assistant.md @@ -36,7 +36,7 @@ The capabilities and ways to interact with AI Assistant can differ for each solu ## Prompt best practices [rag-for-esql] Elastic AI Assistant allows you to take full advantage of the Elastic platform to improve your operations. It can help you write an ES|QL query for a particular use case, or answer general questions about how to use the platform. Its ability to assist you depends on the specificity and detail of your questions. The more context and detail you provide, the more tailored and useful its responses will be. -To maximize its usefulness, consider using more detailed prompts or asking for additional information. For instance, after asking for an ES|QL query example, you could ask a follow-up question like, “Could you give me some other examples?” You can also ask for clarification or further exposition, for example "Please provide comments explaining the query you just gave." +To maximize its usefulness, consider using more detailed prompts or asking for additional information. For instance, after asking for an ES|QL query example, you could ask a follow-up question like, “Could you give me some other examples?” You can also ask for clarification or further exposition, for example "Provide comments explaining the query you just gave." In addition to practical advice, AI Assistant can offer conceptual advice, tips, and best practices for enhancing your security measures. You can ask it, for example: @@ -46,4 +46,4 @@ In addition to practical advice, AI Assistant can offer conceptual advice, tips, ## Your data and AI Assistant [ai-assistant-data-information] Elastic does not use customer data for model training. This includes anything you send the model, such as alert or event data, detection rule configurations, queries, and prompts. However, any data you provide to AI Assistant will be processed by the third-party provider you chose when setting up the generative AI connector as part of the assistant setup. -Elastic does not control third-party tools, and assumes no responsibility or liability for their content, operation, or use, nor for any loss or damage that may arise from your using such tools. Please exercise caution when using AI tools with personal, sensitive, or confidential information. Any data you submit may be used by the provider for AI training or other purposes. There is no guarantee that the provider will keep any information you provide secure or confidential. You should familiarize yourself with the privacy practices and terms of use of any generative AI tools prior to use. +Elastic does not control third-party tools, and assumes no responsibility or liability for their content, operation, or use, nor for any loss or damage that may arise from your using such tools. Exercise caution when using AI tools with personal, sensitive, or confidential information. Any data you submit may be used by the provider for AI training or other purposes. There is no guarantee that the provider will keep any information you provide secure or confidential. You should familiarize yourself with the privacy practices and terms of use of any generative AI tools prior to use. diff --git a/explore-analyze/alerts-cases/watcher/actions-jira.md b/explore-analyze/alerts-cases/watcher/actions-jira.md index dd395e5d77..b8432588fd 100644 --- a/explore-analyze/alerts-cases/watcher/actions-jira.md +++ b/explore-analyze/alerts-cases/watcher/actions-jira.md @@ -87,7 +87,7 @@ bin/elasticsearch-keystore add xpack.notification.jira.account.monitoring.secure bin/elasticsearch-keystore add xpack.notification.jira.account.monitoring.secure_password ``` ::::{warning} -Storing sensitive data (`url`, `user` and `password`) in the configuration file or the cluster settings is insecure and has been deprecated. Please use {{es}}'s secure [keystore](../../../deploy-manage/security/secure-settings.md) method instead. +Storing sensitive data (`url`, `user` and `password`) in the configuration file or the cluster settings is insecure and has been deprecated. Use {{es}}'s secure [keystore](../../../deploy-manage/security/secure-settings.md) method instead. :::: diff --git a/explore-analyze/alerts-cases/watcher/actions-pagerduty.md b/explore-analyze/alerts-cases/watcher/actions-pagerduty.md index 7cae701c35..2577e4f57a 100644 --- a/explore-analyze/alerts-cases/watcher/actions-pagerduty.md +++ b/explore-analyze/alerts-cases/watcher/actions-pagerduty.md @@ -23,7 +23,7 @@ The following snippet shows a simple PagerDuty action definition: "transform" : { ... }, "throttle_period" : "5m", "pagerduty" : { - "description" : "Main system down, please check!" <1> + "description" : "Main system down, check!" <1> } } } @@ -41,7 +41,7 @@ To give the PagerDuty incident some more context, you can attach the payload as "throttle_period" : "5m", "pagerduty" : { "account" : "team1", - "description" : "Main system down, please check! Happened at {{ctx.execution_time}}", + "description" : "Main system down, check! Happened at {{ctx.execution_time}}", "attach_payload" : true, "client" : "/foo/bar/{{ctx.watch_id}}", "client_url" : "http://www.example.org/", diff --git a/explore-analyze/alerts-cases/watcher/actions-slack.md b/explore-analyze/alerts-cases/watcher/actions-slack.md index 0770c0dd78..3d27f078ec 100644 --- a/explore-analyze/alerts-cases/watcher/actions-slack.md +++ b/explore-analyze/alerts-cases/watcher/actions-slack.md @@ -149,7 +149,7 @@ bin/elasticsearch-keystore add xpack.notification.slack.account.monitoring.secur ``` ::::{warning} -You can no longer configure Slack accounts using `elasticsearch.yml` settings. Please use {{es}}'s secure [keystore](../../../deploy-manage/security/secure-settings.md) method instead. +You can no longer configure Slack accounts using `elasticsearch.yml` settings. Use {{es}}'s secure [keystore](../../../deploy-manage/security/secure-settings.md) method instead. :::: diff --git a/explore-analyze/alerts-cases/watcher/actions.md b/explore-analyze/alerts-cases/watcher/actions.md index c88400147f..cc797fe456 100644 --- a/explore-analyze/alerts-cases/watcher/actions.md +++ b/explore-analyze/alerts-cases/watcher/actions.md @@ -146,7 +146,7 @@ If you do not define a throttle period at the action or watch level, the global xpack.watcher.execution.default_throttle_period: 15m ``` -{{watcher}} also supports acknowledgement-based throttling. You can acknowledge a watch using the [ack watch API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch) to prevent the watch actions from being executed again while the watch condition remains `true`. This essentially tells {{watcher}} "I received the notification and I’m handling it, please do not notify me about this error again". An acknowledged watch action remains in the `acked` state until the watch’s condition evaluates to `false`. When that happens, the action’s state changes to `awaits_successful_execution`. +{{watcher}} also supports acknowledgement-based throttling. You can acknowledge a watch using the [ack watch API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch) to prevent the watch actions from being executed again while the watch condition remains `true`. This essentially tells {{watcher}} "I received the notification and I’m handling it, do not notify me about this error again". An acknowledged watch action remains in the `acked` state until the watch’s condition evaluates to `false`. When that happens, the action’s state changes to `awaits_successful_execution`. To acknowledge an action, you use the [ack watch API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch): diff --git a/explore-analyze/index.md b/explore-analyze/index.md index ba0f3708cf..5650d00892 100644 --- a/explore-analyze/index.md +++ b/explore-analyze/index.md @@ -31,7 +31,7 @@ Elastic takes the following measures to ensure accessibility of Kibana: Kibana aims to meet [WCAG 2.1 level AA](https://www.w3.org/WAI/WCAG21/quickref/?currentsidebar=%23col_customize&levels=aaa&technologies=server%2Csmil%2Cflash%2Csl) compliance. Currently, we can only claim to partially conform, meaning we do not fully meet all of the success criteria. However, we do try to take a broader view of accessibility, and go above and beyond the legal and regulatory standards to provide a good experience for all of our users. **Feedback** -We welcome your feedback on the accessibility of Kibana. Please let us know if you encounter accessibility barriers on Kibana by either emailing us at `accessibility@elastic.co` or opening [an issue on GitHub](https://github.com/elastic/kibana/issues/new?labels=Project%3AAccessibility&template=Accessibility.md&title=%28Accessibility%29). +We welcome your feedback on the accessibility of Kibana. Let us know if you encounter accessibility barriers on Kibana by either emailing us at `accessibility@elastic.co` or opening [an issue on GitHub](https://github.com/elastic/kibana/issues/new?labels=Project%3AAccessibility&template=Accessibility.md&title=%28Accessibility%29). **Technical specifications** Accessibility of Kibana relies on the following technologies to work with your web browser and any assistive technologies or plugins installed on your computer: @@ -42,7 +42,7 @@ Accessibility of Kibana relies on the following technologies to work with your w * WAI-ARIA **Limitations and alternatives** -Despite our best efforts to ensure accessibility of Kibana, there are some limitations. Please [open an issue on GitHub](https://github.com/elastic/kibana/issues/new?labels=Project%3AAccessibility&template=Accessibility.md&title=%28Accessibility%29) if you observe an issue not in this list. +Despite our best efforts to ensure accessibility of Kibana, there are some limitations. [Open an issue on GitHub](https://github.com/elastic/kibana/issues/new?labels=Project%3AAccessibility&template=Accessibility.md&title=%28Accessibility%29) if you observe an issue not in this list. Known limitations are in the following areas: @@ -57,7 +57,7 @@ To see individual tickets, view our [GitHub issues with label "`Project:Accessib Elastic assesses the accessibility of Kibana with the following approaches: * **Self-evaluation**: Our employees are familiar with accessibility standards and review new designs and implemented features to confirm that they are accessible. -* **External evaluation**: We engage external contractors to help us conduct an independent assessment and generate a formal VPAT. Please email `accessibility@elastic.co` if you’d like a copy. +* **External evaluation**: We engage external contractors to help us conduct an independent assessment and generate a formal VPAT. Email `accessibility@elastic.co` if you’d like a copy. * **Automated evaluation**: We are starting to run [axe](https://www.deque.com/axe/) on every page. See our current progress in the [automated testing GitHub issue](https://github.com/elastic/kibana/issues/51456). Manual testing largely focuses on screen reader support and is done on: diff --git a/explore-analyze/machine-learning/anomaly-detection/anomaly-detection-scale.md b/explore-analyze/machine-learning/anomaly-detection/anomaly-detection-scale.md index e44d16b0f8..33ad4329af 100644 --- a/explore-analyze/machine-learning/anomaly-detection/anomaly-detection-scale.md +++ b/explore-analyze/machine-learning/anomaly-detection/anomaly-detection-scale.md @@ -92,7 +92,7 @@ You may want to use `chunking_config` to tune your search speed when your {{dfee In certain cases, you cannot do aggregations to increase performance. For example, categorization jobs use the full log message to detect anomalies, so this data cannot be aggregated. If you have many influencer fields, it may not be beneficial to use an aggregation either. This is because only a few documents in each bucket may have the combination of all the different influencer fields. -Please consult [Aggregating data for faster performance](ml-configuring-aggregation.md) to learn more. +See [Aggregating data for faster performance](ml-configuring-aggregation.md) to learn more. ## 9. Optimize the results retention [results-retention] @@ -118,7 +118,7 @@ For more information, refer to [Model snapshots](/explore-analyze/machine-learni ## 12. Optimize your search queries [search-queries] -If you are operating on a big scale, make sure that your {{dfeed}} query is as efficient as possible. There are different ways to write {{es}} queries and some of them are more efficient than others. Please consult [Tune for search speed](../../../deploy-manage/production-guidance/optimize-performance/search-speed.md) to learn more about {{es}} performance tuning. +If you are operating on a big scale, make sure that your {{dfeed}} query is as efficient as possible. There are different ways to write {{es}} queries and some of them are more efficient than others. See [Tune for search speed](../../../deploy-manage/production-guidance/optimize-performance/search-speed.md) to learn more about {{es}} performance tuning. You need to clone or recreate an existing job if you want to optimize its search query. diff --git a/explore-analyze/machine-learning/anomaly-detection/ml-getting-started.md b/explore-analyze/machine-learning/anomaly-detection/ml-getting-started.md index 82ef2c286e..86bb7ddd18 100644 --- a/explore-analyze/machine-learning/anomaly-detection/ml-getting-started.md +++ b/explore-analyze/machine-learning/anomaly-detection/ml-getting-started.md @@ -315,4 +315,4 @@ In general, it is a good idea to start with single metric {{anomaly-jobs}} for y If you want to find more sample jobs, see [Supplied configurations](ootb-ml-jobs.md). In particular, there are sample jobs for [Apache](/reference/data-analysis/machine-learning/ootb-ml-jobs-apache.md) and [Nginx](/reference/data-analysis/machine-learning/ootb-ml-jobs-nginx.md) that are quite similar to the examples in this tutorial. -If you encounter problems, we’re here to help. If you are an existing Elastic customer with a support contract, please create a ticket in the [Elastic Support portal](http://support.elastic.co). Or post in the [Elastic forum](https://discuss.elastic.co/). +If you encounter problems, we’re here to help. If you are an existing Elastic customer with a support contract, create a ticket in the [Elastic Support portal](http://support.elastic.co). Or post in the [Elastic forum](https://discuss.elastic.co/). diff --git a/explore-analyze/machine-learning/data-frame-analytics/ml-trained-models.md b/explore-analyze/machine-learning/data-frame-analytics/ml-trained-models.md index a1326740f3..5d026b2c13 100644 --- a/explore-analyze/machine-learning/data-frame-analytics/ml-trained-models.md +++ b/explore-analyze/machine-learning/data-frame-analytics/ml-trained-models.md @@ -115,4 +115,4 @@ If you also want to copy the {{dfanalytics-job}} to the new cluster, you can exp ## Importing an external model to the {{stack}} [import-external-model-to-es] -It is possible to import a model to your {{es}} cluster even if the model is not trained by Elastic {{dfanalytics}}. Eland supports [importing models](eland://reference/machine-learning.md) directly through its APIs. Please refer to the latest [Eland documentation](https://eland.readthedocs.io/en/latest/index.html) for more information on supported model types and other details of using Eland to import models with. +It is possible to import a model to your {{es}} cluster even if the model is not trained by Elastic {{dfanalytics}}. Eland supports [importing models](eland://reference/machine-learning.md) directly through its APIs. See the latest [Eland documentation](https://eland.readthedocs.io/en/latest/index.html) for more information on supported model types and other details of using Eland to import models with. diff --git a/explore-analyze/machine-learning/nlp/ml-nlp-limitations.md b/explore-analyze/machine-learning/nlp/ml-nlp-limitations.md index 60b4ba8f5f..ea18f093d7 100644 --- a/explore-analyze/machine-learning/nlp/ml-nlp-limitations.md +++ b/explore-analyze/machine-learning/nlp/ml-nlp-limitations.md @@ -12,7 +12,7 @@ The following limitations and known problems apply to the 9.0.0-beta1 release of ## Document size limitations when using `semantic_text` fields [ml-nlp-large-documents-limit-10k-10mb] -When using semantic text to ingest documents, chunking takes place automatically. The number of chunks is limited by the [`index.mapping.nested_objects.limit`](elasticsearch://reference/elasticsearch/index-settings/mapping-limit.md) cluster setting, which defaults to 10k. Documents that are too large will cause errors during ingestion. To avoid this issue, please split your documents into roughly 1MB parts before ingestion. +When using semantic text to ingest documents, chunking takes place automatically. The number of chunks is limited by the [`index.mapping.nested_objects.limit`](elasticsearch://reference/elasticsearch/index-settings/mapping-limit.md) cluster setting, which defaults to 10k. Documents that are too large will cause errors during ingestion. To avoid this issue, split your documents into roughly 1MB parts before ingestion. ## ELSER semantic search is limited to 512 tokens per field that inference is applied to [ml-nlp-elser-v1-limit-512] diff --git a/explore-analyze/query-filter/languages/sql-client-apps-dbeaver.md b/explore-analyze/query-filter/languages/sql-client-apps-dbeaver.md index 3b05dfe36a..366bda3507 100644 --- a/explore-analyze/query-filter/languages/sql-client-apps-dbeaver.md +++ b/explore-analyze/query-filter/languages/sql-client-apps-dbeaver.md @@ -11,7 +11,7 @@ mapped_pages: You can use the {{es}} JDBC driver to access {{es}} data from DBeaver. ::::{important} -Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. +Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, reach out to its vendor. :::: diff --git a/explore-analyze/query-filter/languages/sql-client-apps-excel.md b/explore-analyze/query-filter/languages/sql-client-apps-excel.md index 5086381dad..774c361db5 100644 --- a/explore-analyze/query-filter/languages/sql-client-apps-excel.md +++ b/explore-analyze/query-filter/languages/sql-client-apps-excel.md @@ -11,7 +11,7 @@ mapped_pages: You can use the {{es}} ODBC driver to access {{es}} data from Microsoft Excel. ::::{important} -Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. +Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, reach out to its vendor. :::: diff --git a/explore-analyze/query-filter/languages/sql-client-apps-microstrat.md b/explore-analyze/query-filter/languages/sql-client-apps-microstrat.md index 186ec14f0a..584e898596 100644 --- a/explore-analyze/query-filter/languages/sql-client-apps-microstrat.md +++ b/explore-analyze/query-filter/languages/sql-client-apps-microstrat.md @@ -11,7 +11,7 @@ mapped_pages: You can use the {{es}} ODBC driver to access {{es}} data from MicroStrategy Desktop. ::::{important} -Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. +Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, reach out to its vendor. :::: diff --git a/explore-analyze/query-filter/languages/sql-client-apps-powerbi.md b/explore-analyze/query-filter/languages/sql-client-apps-powerbi.md index 2362947818..8b0125a214 100644 --- a/explore-analyze/query-filter/languages/sql-client-apps-powerbi.md +++ b/explore-analyze/query-filter/languages/sql-client-apps-powerbi.md @@ -11,7 +11,7 @@ mapped_pages: You can use the {{es}} ODBC driver to access {{es}} data from Microsoft Power BI Desktop. ::::{important} -Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. +Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, reach out to its vendor. :::: diff --git a/explore-analyze/query-filter/languages/sql-client-apps-ps1.md b/explore-analyze/query-filter/languages/sql-client-apps-ps1.md index fdc5ad7d69..945b084fc4 100644 --- a/explore-analyze/query-filter/languages/sql-client-apps-ps1.md +++ b/explore-analyze/query-filter/languages/sql-client-apps-ps1.md @@ -11,7 +11,7 @@ mapped_pages: You can use the {{es}} ODBC driver to access {{es}} data from Microsoft PowerShell. ::::{important} -Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. +Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, reach out to its vendor. :::: diff --git a/explore-analyze/query-filter/languages/sql-client-apps-qlik.md b/explore-analyze/query-filter/languages/sql-client-apps-qlik.md index 042636fab0..e07a4c51ca 100644 --- a/explore-analyze/query-filter/languages/sql-client-apps-qlik.md +++ b/explore-analyze/query-filter/languages/sql-client-apps-qlik.md @@ -11,7 +11,7 @@ mapped_pages: You can use the {{es}} ODBC driver to access {{es}} data from Qlik Sense Desktop. ::::{important} -Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. +Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, reach out to its vendor. :::: diff --git a/explore-analyze/query-filter/languages/sql-client-apps-squirrel.md b/explore-analyze/query-filter/languages/sql-client-apps-squirrel.md index 665cbc70ea..9cadafe838 100644 --- a/explore-analyze/query-filter/languages/sql-client-apps-squirrel.md +++ b/explore-analyze/query-filter/languages/sql-client-apps-squirrel.md @@ -11,7 +11,7 @@ mapped_pages: You can use the {{es}} JDBC driver to access {{es}} data from SQuirreL SQL. ::::{important} -Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. +Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, reach out to its vendor. :::: diff --git a/explore-analyze/query-filter/languages/sql-client-apps-tableau-desktop.md b/explore-analyze/query-filter/languages/sql-client-apps-tableau-desktop.md index 1e2d54d8af..c24bb202e2 100644 --- a/explore-analyze/query-filter/languages/sql-client-apps-tableau-desktop.md +++ b/explore-analyze/query-filter/languages/sql-client-apps-tableau-desktop.md @@ -11,7 +11,7 @@ mapped_pages: Use the {{es}} JDBC driver and dedicated {{es}} Tableau Connector to access {{es}} data from Tableau Desktop. ::::{important} -Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. +Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, reach out to its vendor. :::: diff --git a/explore-analyze/query-filter/languages/sql-client-apps-tableau-server.md b/explore-analyze/query-filter/languages/sql-client-apps-tableau-server.md index ac9fe574ce..45977686d7 100644 --- a/explore-analyze/query-filter/languages/sql-client-apps-tableau-server.md +++ b/explore-analyze/query-filter/languages/sql-client-apps-tableau-server.md @@ -11,7 +11,7 @@ mapped_pages: Use the {{es}} JDBC driver and dedicated {{es}} Tableau Connector to access {{es}} data from Tableau Server. ::::{important} -Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. +Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, reach out to its vendor. :::: diff --git a/explore-analyze/query-filter/languages/sql-client-apps-workbench.md b/explore-analyze/query-filter/languages/sql-client-apps-workbench.md index 806ccf12ae..9fca7b3a37 100644 --- a/explore-analyze/query-filter/languages/sql-client-apps-workbench.md +++ b/explore-analyze/query-filter/languages/sql-client-apps-workbench.md @@ -11,7 +11,7 @@ mapped_pages: You can use the {{es}} JDBC driver to access {{es}} data from SQL Workbench/J. ::::{important} -Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. +Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, reach out to its vendor. :::: diff --git a/explore-analyze/query-filter/languages/sql-client-apps.md b/explore-analyze/query-filter/languages/sql-client-apps.md index 9dbb0a7b2e..0c922ba435 100644 --- a/explore-analyze/query-filter/languages/sql-client-apps.md +++ b/explore-analyze/query-filter/languages/sql-client-apps.md @@ -23,7 +23,7 @@ Thanks to its [JDBC](sql-jdbc.md) and [ODBC](sql-odbc.md) interfaces, a broad ra * [Tableau Server](sql-client-apps-tableau-server.md) ::::{important} -Elastic does not endorse, promote or provide support for any of the applications listed. For native Elasticsearch integration in these products, please reach out to their respective vendor. +Elastic does not endorse, promote or provide support for any of the applications listed. For native Elasticsearch integration in these products, reach out to their respective vendor. :::: diff --git a/explore-analyze/query-filter/languages/sql-concepts.md b/explore-analyze/query-filter/languages/sql-concepts.md index 3d4b6e0cb0..462ba15678 100644 --- a/explore-analyze/query-filter/languages/sql-concepts.md +++ b/explore-analyze/query-filter/languages/sql-concepts.md @@ -13,7 +13,7 @@ mapped_pages: For clarity, it is important to establish the meaning behind certain words as, the same wording might convey different meanings to different readers depending on one’s familiarity with SQL versus {{es}}. ::::{note} -This documentation while trying to be complete, does assume the reader has *basic* understanding of {{es}} and/or SQL. If that is not the case, please continue reading the documentation however take notes and pursue the topics that are unclear either through the main {{es}} documentation or through the plethora of SQL material available in the open (there are simply too many excellent resources here to enumerate). +This documentation while trying to be complete, does assume the reader has *basic* understanding of {{es}} and/or SQL. If that is not the case, continue reading the documentation however take notes and pursue the topics that are unclear either through the main {{es}} documentation or through the plethora of SQL material available in the open (there are simply too many excellent resources here to enumerate). :::: diff --git a/explore-analyze/transforms/transform-limitations.md b/explore-analyze/transforms/transform-limitations.md index 4ff84ca8e2..cb5ad43e8f 100644 --- a/explore-analyze/transforms/transform-limitations.md +++ b/explore-analyze/transforms/transform-limitations.md @@ -49,7 +49,7 @@ A {{ctransform}} periodically checks for changes to source data. The functionali ### Aggregation responses may be incompatible with destination index mappings [transform-aggresponse-limitations] -When a pivot {{transform}} is first started, it will deduce the mappings required for the destination index. This process is based on the field types of the source index and the aggregations used. If the fields are derived from [`scripted_metrics`](elasticsearch://reference/aggregations/search-aggregations-metrics-scripted-metric-aggregation.md) or [`bucket_scripts`](elasticsearch://reference/aggregations/search-aggregations-pipeline-bucket-script-aggregation.md), [dynamic mappings](../../manage-data/data-store/mapping/dynamic-mapping.md) will be used. In some instances the deduced mappings may be incompatible with the actual data. For example, numeric overflows might occur or dynamically mapped fields might contain both numbers and strings. Please check {{es}} logs if you think this may have occurred. +When a pivot {{transform}} is first started, it will deduce the mappings required for the destination index. This process is based on the field types of the source index and the aggregations used. If the fields are derived from [`scripted_metrics`](elasticsearch://reference/aggregations/search-aggregations-metrics-scripted-metric-aggregation.md) or [`bucket_scripts`](elasticsearch://reference/aggregations/search-aggregations-pipeline-bucket-script-aggregation.md), [dynamic mappings](../../manage-data/data-store/mapping/dynamic-mapping.md) will be used. In some instances the deduced mappings may be incompatible with the actual data. For example, numeric overflows might occur or dynamically mapped fields might contain both numbers and strings. Check {{es}} logs if you think this may have occurred. You can view the deduced mappings by using the [preview transform API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform). See the `generated_dest_index` object in the API response. @@ -119,7 +119,7 @@ If your data uses the [date nanosecond data type](elasticsearch://reference/elas [ILM](../../manage-data/lifecycle/index-lifecycle-management.md) is not recommended to use as a {{transform}} destination index. {{transforms-cap}} update documents in the current destination, and cannot delete documents in the indices previously used by ILM. This may lead to duplicated documents when you use {{transforms}} combined with ILM in case of a rollover. -If you use ILM to have time-based indices, please consider using the [Date index name](elasticsearch://reference/enrich-processor/date-index-name-processor.md) instead. The processor works without duplicated documents if your {{transform}} contains a `group_by` based on `date_histogram`. +If you use ILM to have time-based indices, consider using the [Date index name](elasticsearch://reference/enrich-processor/date-index-name-processor.md) instead. The processor works without duplicated documents if your {{transform}} contains a `group_by` based on `date_histogram`. ## Limitations in {{kib}} [transform-ui-limitations] diff --git a/manage-data/ingest/ingesting-data-from-applications/ingest-data-from-beats-to-elasticsearch-service-with-logstash-as-proxy.md b/manage-data/ingest/ingesting-data-from-applications/ingest-data-from-beats-to-elasticsearch-service-with-logstash-as-proxy.md index 99d5d5d7b8..18f374e827 100644 --- a/manage-data/ingest/ingesting-data-from-applications/ingest-data-from-beats-to-elasticsearch-service-with-logstash-as-proxy.md +++ b/manage-data/ingest/ingesting-data-from-applications/ingest-data-from-beats-to-elasticsearch-service-with-logstash-as-proxy.md @@ -195,7 +195,7 @@ Your results should be similar to the following: Index setup finished. Loading dashboards (Kibana must be running and reachable) Loaded dashboards -Setting up ML using setup --machine-learning is going to be removed in 8.0.0. Please use the ML app instead. +Setting up ML using setup --machine-learning is going to be removed in 8.0.0. Use the ML app instead. See more: /explore-analyze/machine-learning.md Loaded machine learning job configurations Loaded Ingest pipelines diff --git a/manage-data/ingest/upload-data-files.md b/manage-data/ingest/upload-data-files.md index 018d7412d2..bd327c0a06 100644 --- a/manage-data/ingest/upload-data-files.md +++ b/manage-data/ingest/upload-data-files.md @@ -16,7 +16,7 @@ applies_to: % - [x] ./raw-migrated-files/docs-content/serverless/elasticsearch-ingest-data-file-upload.md % - [x] ./raw-migrated-files/kibana/kibana/connect-to-elasticsearch.md -% Note from David: I've removed the ID $$$upload-data-kibana$$$ from manage-data/ingest.md as those links should instead point to this page. So, please ensure that the following ID is included on this page. I've added it beside the title. +% Note from David: I've removed the ID $$$upload-data-kibana$$$ from manage-data/ingest.md as those links should instead point to this page. So, ensure that the following ID is included on this page. I've added it beside the title. You can upload files, view their fields and metrics, and optionally import them to {{es}} with the Data Visualizer. diff --git a/manage-data/lifecycle/rollup.md b/manage-data/lifecycle/rollup.md index 45a2cde414..9f2c3a44f9 100644 --- a/manage-data/lifecycle/rollup.md +++ b/manage-data/lifecycle/rollup.md @@ -9,7 +9,7 @@ mapped_pages: ::::{admonition} Deprecated in 8.11.0. :class: warning -Rollups will be removed in a future version. Please [migrate](/manage-data/lifecycle/rollup/migrating-from-rollup-to-downsampling.md) to [downsampling](/manage-data/data-store/data-streams/downsampling-time-series-data-stream.md) instead. +Rollups will be removed in a future version. [Migrate](/manage-data/lifecycle/rollup/migrating-from-rollup-to-downsampling.md) to [downsampling](/manage-data/data-store/data-streams/downsampling-time-series-data-stream.md) instead. :::: Keeping historical data around for analysis is extremely useful but often avoided due to the financial cost of archiving massive amounts of data. For example, your system may be generating 500 documents every second. That will generate 43 million documents per day, and nearly 16 billion documents a year. Retention periods are thus driven by financial realities rather than by the usefulness of extensive historical data. diff --git a/manage-data/lifecycle/rollup/getting-started-api.md b/manage-data/lifecycle/rollup/getting-started-api.md index 10d2736f5a..d874afba84 100644 --- a/manage-data/lifecycle/rollup/getting-started-api.md +++ b/manage-data/lifecycle/rollup/getting-started-api.md @@ -9,7 +9,7 @@ mapped_pages: ::::{admonition} Deprecated in 8.11.0. :class: warning -Rollups will be removed in a future version. Please [migrate](migrating-from-rollup-to-downsampling.md) to [downsampling](/manage-data/data-store/data-streams/downsampling-time-series-data-stream.md) instead. +Rollups will be removed in a future version. [Migrate](migrating-from-rollup-to-downsampling.md) to [downsampling](/manage-data/data-store/data-streams/downsampling-time-series-data-stream.md) instead. :::: ::::{warning} diff --git a/manage-data/lifecycle/rollup/rollup-aggregation-limitations.md b/manage-data/lifecycle/rollup/rollup-aggregation-limitations.md index ebe0a18b15..392023e2a0 100644 --- a/manage-data/lifecycle/rollup/rollup-aggregation-limitations.md +++ b/manage-data/lifecycle/rollup/rollup-aggregation-limitations.md @@ -11,7 +11,7 @@ applies_to: ::::{admonition} Deprecated in 8.11.0. :class: warning -Rollups will be removed in a future version. Please [migrate](migrating-from-rollup-to-downsampling.md) to [downsampling](../../data-store/data-streams/downsampling-time-series-data-stream.md) instead. +Rollups will be removed in a future version. [Migrate](migrating-from-rollup-to-downsampling.md) to [downsampling](../../data-store/data-streams/downsampling-time-series-data-stream.md) instead. :::: diff --git a/manage-data/lifecycle/rollup/rollup-search-limitations.md b/manage-data/lifecycle/rollup/rollup-search-limitations.md index 25ff85c37e..74c2a68c42 100644 --- a/manage-data/lifecycle/rollup/rollup-search-limitations.md +++ b/manage-data/lifecycle/rollup/rollup-search-limitations.md @@ -11,7 +11,7 @@ applies_to: ::::{admonition} Deprecated in 8.11.0. :class: warning -Rollups will be removed in a future version. Please [migrate](migrating-from-rollup-to-downsampling.md) to [downsampling](../../data-store/data-streams/downsampling-time-series-data-stream.md) instead. +Rollups will be removed in a future version. [Migrate](migrating-from-rollup-to-downsampling.md) to [downsampling](../../data-store/data-streams/downsampling-time-series-data-stream.md) instead. :::: diff --git a/manage-data/lifecycle/rollup/understanding-groups.md b/manage-data/lifecycle/rollup/understanding-groups.md index af9aedca4c..1a3739adb6 100644 --- a/manage-data/lifecycle/rollup/understanding-groups.md +++ b/manage-data/lifecycle/rollup/understanding-groups.md @@ -11,7 +11,7 @@ applies_to: ::::{admonition} Deprecated in 8.11.0. :class: warning -Rollups will be removed in a future version. Please [migrate](migrating-from-rollup-to-downsampling.md) to [downsampling](../../data-store/data-streams/downsampling-time-series-data-stream.md) instead. +Rollups will be removed in a future version. [Migrate](migrating-from-rollup-to-downsampling.md) to [downsampling](../../data-store/data-streams/downsampling-time-series-data-stream.md) instead. :::: diff --git a/reference/fleet/agent-policy.md b/reference/fleet/agent-policy.md index 2ec9151e57..90af4ee341 100644 --- a/reference/fleet/agent-policy.md +++ b/reference/fleet/agent-policy.md @@ -94,7 +94,7 @@ To add a new integration to one or more {{agent}} policies: 6. In Step 2 on the page, you have two options: 1. If you’d like to create a new policy for your {{agent}}s, on the **New hosts** tab specify a name for the new agent policy and choose whether or not to collect system logs and metrics. Collecting logs and metrics will add the System integration to the new agent policy. - 2. If you already have an {{agent}} policy created, on the **Existing hosts** tab use the drop-down menu to specify one or more agent policies that you’d like to add the integration to. Please note this this feature, known as "reusable integrations", requires an [Enterprise subscription](https://www.elastic.co/subscriptions). + 2. If you already have an {{agent}} policy created, on the **Existing hosts** tab use the drop-down menu to specify one or more agent policies that you’d like to add the integration to. Note this this feature, known as "reusable integrations", requires an [Enterprise subscription](https://www.elastic.co/subscriptions). 7. Click **Save and continue** to confirm your settings. diff --git a/reference/fleet/air-gapped.md b/reference/fleet/air-gapped.md index fe802b7a60..ae9c55bc31 100644 --- a/reference/fleet/air-gapped.md +++ b/reference/fleet/air-gapped.md @@ -107,8 +107,8 @@ There are different distributions available: * {{stack-version}} (recommended): *docker.elastic.co/package-registry/distribution:{{stack-version}}* - Selection of packages from the production repository released with {{stack}} {{stack-version}}. * lite-{{stack-version}}: *docker.elastic.co/package-registry/distribution:lite-{{stack-version}}* - Subset of the most commonly used packages from the production repository released with {{stack}} {{stack-version}}. This image is a good candidate to start using {{fleet}} in air-gapped environments. -* production: *docker.elastic.co/package-registry/distribution:production* - Packages available in the production registry ([https://epr.elastic.co](https://epr.elastic.co)). Please note that this image is updated every time a new version of a package gets published. -* lite: *docker.elastic.co/package-registry/distribution:lite* - Subset of the most commonly used packages available in the production registry ([https://epr.elastic.co](https://epr.elastic.co)). Please note that this image is updated every time a new version of a package gets published. +* production: *docker.elastic.co/package-registry/distribution:production* - Packages available in the production registry ([https://epr.elastic.co](https://epr.elastic.co)). Note that this image is updated every time a new version of a package gets published. +* lite: *docker.elastic.co/package-registry/distribution:lite* - Subset of the most commonly used packages available in the production registry ([https://epr.elastic.co](https://epr.elastic.co)). Note that this image is updated every time a new version of a package gets published. To update the distribution image, re-pull the image and then restart the docker container. diff --git a/reference/fleet/data-streams-scenario3.md b/reference/fleet/data-streams-scenario3.md index 19504971cb..0d1a858ff6 100644 --- a/reference/fleet/data-streams-scenario3.md +++ b/reference/fleet/data-streams-scenario3.md @@ -75,7 +75,7 @@ metrics-system.network-production@custom Now that you’ve created a component template, you need to create an index template to apply the changes to the correct data stream. The easiest way to do this is to duplicate and modify the integration’s existing index template. ::::{warning} -Please note the following: * When duplicating the index template, do not change or remove any managed properties. This may result in problems when upgrading. Cloning the index template of an integration package involves some risk as any changes made to the original index template when it is upgraded will not be propagated to the cloned version. * These steps assume that you want to have a namespace specific ILM policy, which requires index template cloning. Cloning the index template of an integration package involves some risk because any changes made to the original index template as part of package upgrades are not propagated to the cloned version. See [Cloning the index template of an integration package](/reference/fleet/integrations-assets-best-practices.md#assets-restrictions-cloning-index-template) for details. +Note the following: * When duplicating the index template, do not change or remove any managed properties. This may result in problems when upgrading. Cloning the index template of an integration package involves some risk as any changes made to the original index template when it is upgraded will not be propagated to the cloned version. * These steps assume that you want to have a namespace specific ILM policy, which requires index template cloning. Cloning the index template of an integration package involves some risk because any changes made to the original index template as part of package upgrades are not propagated to the cloned version. See [Cloning the index template of an integration package](/reference/fleet/integrations-assets-best-practices.md#assets-restrictions-cloning-index-template) for details. + If you want to change the ILM Policy, the number of shards, or other settings for the datastreams of one or more integrations, but **the changes do not need to be specific to a given namespace**, it’s strongly recommended to use a `@custom` component template, as described in [Scenario 1](/reference/fleet/data-streams-scenario1.md) and [Scenario 2](/reference/fleet/data-streams-scenario2.md), so as to avoid the problems mentioned above. See the [ILM](/reference/fleet/data-streams.md#data-streams-ilm) section for details. diff --git a/reference/fleet/elastic-agent-inputs-list.md b/reference/fleet/elastic-agent-inputs-list.md index b016521346..530afab799 100644 --- a/reference/fleet/elastic-agent-inputs-list.md +++ b/reference/fleet/elastic-agent-inputs-list.md @@ -97,7 +97,7 @@ When you [configure inputs](/reference/fleet/elastic-agent-input-configuration.m | `httpjson` | Read messages from an HTTP API with JSON payloads. | [HTTP JSON input](beats://reference/filebeat/filebeat-input-httpjson.md) ({{filebeat}} docs) | | `journald` | [beta] A system service that collects and stores logging data. | [Journald input](beats://reference/filebeat/filebeat-input-journald.md) ({{filebeat}} docs) | | `kafka` | Reads from topics in a Kafka cluster. | [Kafka input](beats://reference/filebeat/filebeat-input-kafka.md) ({{filebeat}} docs) | -| `log` | DEPRECATED: Please use the `filestream` input instead. | n/a | +| `log` | DEPRECATED: Use the `filestream` input instead. | n/a | | `logfile` | Alias for `log`. | n/a | | `log/redis_slowlog` | Alias for `redis`. | n/a | | `log/syslog` | Alias for `syslog`. | n/a | diff --git a/reference/fleet/fleet-agent-proxy-managed.md b/reference/fleet/fleet-agent-proxy-managed.md index a2c260a3d9..8125b4dba2 100644 --- a/reference/fleet/fleet-agent-proxy-managed.md +++ b/reference/fleet/fleet-agent-proxy-managed.md @@ -76,7 +76,7 @@ These steps describe how to set up {{fleet}} components to use a proxy. :::::{admonition} ::::{warning} - If agents are unable to reach the configured proxy server, they will not be able to write data to the output that has the proxy server configured. When changing the proxy of an output, please ensure that the affected agents all have connectivity to the proxy itself. + If agents are unable to reach the configured proxy server, they will not be able to write data to the output that has the proxy server configured. When changing the proxy of an output, ensure that the affected agents all have connectivity to the proxy itself. :::: @@ -99,7 +99,7 @@ These steps describe how to set up {{fleet}} components to use a proxy. :::::{admonition} ::::{warning} - If agents are unable to reach the configured proxy server, they will not be able to download binaries from the agent download source that has the proxy server configured. When changing the proxy of an agent binary source, please ensure that the affected agents all have connectivity to the proxy itself. + If agents are unable to reach the configured proxy server, they will not be able to download binaries from the agent download source that has the proxy server configured. When changing the proxy of an agent binary source, ensure that the affected agents all have connectivity to the proxy itself. :::: diff --git a/reference/fleet/index.md b/reference/fleet/index.md index 72bb3f9eba..2c177304a9 100644 --- a/reference/fleet/index.md +++ b/reference/fleet/index.md @@ -25,7 +25,7 @@ As the following diagram illustrates, {{agent}} can monitor the host where it's To learn about installation options, refer to [](/reference/fleet/install-elastic-agents.md). :::{note} -Using {{fleet}} and {{agent}} {{serverless-full}}? Please note these [restrictions](/reference/fleet/fleet-agent-serverless-restrictions.md). +Using {{fleet}} and {{agent}} {{serverless-full}}? Note these [restrictions](/reference/fleet/fleet-agent-serverless-restrictions.md). ::: :::{tip} diff --git a/reference/fleet/kafka-output-settings.md b/reference/fleet/kafka-output-settings.md index 2f1c380b7f..c7864d4c17 100644 --- a/reference/fleet/kafka-output-settings.md +++ b/reference/fleet/kafka-output-settings.md @@ -8,7 +8,7 @@ mapped_pages: Specify these settings to send data over a secure connection to Kafka. In the {{fleet}} [Output settings](/reference/fleet/fleet-settings.md#output-settings), make sure that the Kafka output type is selected. ::::{note} -If you plan to use {{ls}} to modify {{agent}} output data before it’s sent to Kafka, please refer to our [guidance](#kafka-output-settings-ls-warning) for doing so, further in on this page. +If you plan to use {{ls}} to modify {{agent}} output data before it’s sent to Kafka, refer to our [guidance](#kafka-output-settings-ls-warning) for doing so, further in on this page. :::: @@ -117,7 +117,7 @@ Configure timeout and buffer size values for the Kafka brokers. ## Kafka output and using {{ls}} to index data to {{es}} [kafka-output-settings-ls-warning] -If you are considering using {{ls}} to ship the data from `kafka` to {{es}}, please be aware the structure of the documents sent from {{agent}} to `kafka` must not be modified by {{ls}}. We suggest disabling `ecs_compatibility` on both the `kafka` input and the `json` codec in order to make sure the input doesn’t edit the fields and their contents. +If you are considering using {{ls}} to ship the data from `kafka` to {{es}}, be aware the structure of the documents sent from {{agent}} to `kafka` must not be modified by {{ls}}. We suggest disabling `ecs_compatibility` on both the `kafka` input and the `json` codec in order to make sure the input doesn’t edit the fields and their contents. The data streams setup by the integrations expect to receive events having the same structure and field names as they were sent directly from an {{agent}}. diff --git a/reference/fleet/kafka-output.md b/reference/fleet/kafka-output.md index 21dda248fa..f5a062d470 100644 --- a/reference/fleet/kafka-output.md +++ b/reference/fleet/kafka-output.md @@ -43,7 +43,7 @@ outputs: ## Kafka output and using {{ls}} to index data to {{es}} [_kafka_output_and_using_ls_to_index_data_to_es] -If you are considering using {{ls}} to ship the data from `kafka` to {{es}}, please be aware the structure of the documents sent from {{agent}} to `kafka` must not be modified by {{ls}}. We suggest disabling `ecs_compatibility` on both the `kafka` input and the `json` codec in order to make sure the input doesn’t edit the fields and their contents. +If you are considering using {{ls}} to ship the data from `kafka` to {{es}}, be aware the structure of the documents sent from {{agent}} to `kafka` must not be modified by {{ls}}. We suggest disabling `ecs_compatibility` on both the `kafka` input and the `json` codec in order to make sure the input doesn’t edit the fields and their contents. The data streams set up by the integrations expect to receive events having the same structure and field names as they were sent directly from an {{agent}}. diff --git a/reference/fleet/logstash-output.md b/reference/fleet/logstash-output.md index 4d8300994d..18cf11e7eb 100644 --- a/reference/fleet/logstash-output.md +++ b/reference/fleet/logstash-output.md @@ -25,7 +25,7 @@ outputs: To receive the events in {{ls}}, you also need to create a {{ls}} configuration pipeline. The {{ls}} configuration pipeline listens for incoming {{agent}} connections, processes received events, and then sends the events to {{es}}. -Please be aware that the structure of the documents sent from {{agent}} to {{ls}} must not be modified by the pipeline. We recommend that the pipeline doesn’t edit or remove the fields and their contents. Editing the structure of the documents coming from {{agent}} can prevent the {{es}} ingest pipelines associated to the integrations in use to work correctly. We cannot guarantee that the {{es}} ingest pipelines associated to the integrations using {agent} can work with missing or modified fields. +Be aware that the structure of the documents sent from {{agent}} to {{ls}} must not be modified by the pipeline. We recommend that the pipeline doesn’t edit or remove the fields and their contents. Editing the structure of the documents coming from {{agent}} can prevent the {{es}} ingest pipelines associated to the integrations in use to work correctly. We cannot guarantee that the {{es}} ingest pipelines associated to the integrations using {agent} can work with missing or modified fields. The following {{ls}} pipeline definition example configures a pipeline that listens on port `5044` for incoming {{agent}} connections and routes received events to {{es}}. diff --git a/reference/fleet/ls-output-settings.md b/reference/fleet/ls-output-settings.md index abb453a800..4bcb54ce2e 100644 --- a/reference/fleet/ls-output-settings.md +++ b/reference/fleet/ls-output-settings.md @@ -17,7 +17,7 @@ To receive the events in {{ls}}, you also need to create a {{ls}} configuration The following example configures a {{ls}} pipeline that listens on port `5044` for incoming {{agent}} connections and routes received events to {{es}}. -The {{ls}} pipeline definition below is an example. Please refer to the `Additional Logstash configuration required` steps when creating the {{ls}} output in the Fleet outputs page. +The {{ls}} pipeline definition below is an example. See the `Additional Logstash configuration required` steps when creating the {{ls}} output in the Fleet outputs page. ```yaml input { diff --git a/reference/fleet/manage-integrations.md b/reference/fleet/manage-integrations.md index 92c2ccb439..b566545d8a 100644 --- a/reference/fleet/manage-integrations.md +++ b/reference/fleet/manage-integrations.md @@ -23,7 +23,7 @@ Each integration comes prepackaged with assets that support all of your observab ::::{note} -Please be aware that some integrations may function differently across different spaces. Also, some might only work in the default space. We recommend reviewing the specific integration documentation for any space-related considerations. +Be aware that some integrations may function differently across different spaces. Also, some might only work in the default space. We recommend reviewing the specific integration documentation for any space-related considerations. :::: diff --git a/reference/fleet/migrate-auditbeat-to-agent.md b/reference/fleet/migrate-auditbeat-to-agent.md index 0ea3ad3efe..66bb143c71 100644 --- a/reference/fleet/migrate-auditbeat-to-agent.md +++ b/reference/fleet/migrate-auditbeat-to-agent.md @@ -22,7 +22,7 @@ The following table describes the integrations you can use instead of {{auditbea | If you use…​ | You can use this instead…​ | Notes | | --- | --- | --- | | [Auditd](beats://reference/auditbeat/auditbeat-module-auditd.md) module | [Auditd Manager](integration-docs://reference/auditd_manager/index.md) integration | This integration is a direct replacement of the module. You can port rules andconfiguration to this integration. Starting in {{stack}} 8.4, you can also set the`immutable` flag in the audit configuration. | -| [Auditd Logs](integration-docs://reference/auditd/index.md) integration | Use this integration if you don’t need to manage rules. It only parses logs fromthe audit daemon `auditd`. Please note that the events created by this integrationare different than the ones created by[Auditd Manager](integration-docs://reference/auditd_manager/index.md), since the latter merges allrelated messages in a single event while [Auditd Logs](integration-docs://reference/auditd/index.md)creates one event per message. | +| [Auditd Logs](integration-docs://reference/auditd/index.md) integration | Use this integration if you don’t need to manage rules. It only parses logs fromthe audit daemon `auditd`. Note that the events created by this integrationare different than the ones created by[Auditd Manager](integration-docs://reference/auditd_manager/index.md), since the latter merges allrelated messages in a single event while [Auditd Logs](integration-docs://reference/auditd/index.md)creates one event per message. | | [File Integrity](beats://reference/auditbeat/auditbeat-module-file_integrity.md) module | [File Integrity Monitoring](integration-docs://reference/fim/index.md) integration | This integration is a direct replacement of the module. It reports real-timeevents, but cannot report who made the changes. If you need to track thisinformation, use [{{elastic-defend}}](/solutions/security/configure-elastic-defend/install-elastic-defend.md) instead. | | [System](beats://reference/auditbeat/auditbeat-module-system.md) module | It depends…​ | There is not a single integration that collects all this information. | | [System.host](beats://reference/auditbeat/auditbeat-dataset-system-host.md) dataset | [Osquery](integration-docs://reference/osquery/index.md) or [Osquery Manager](integration-docs://reference/osquery_manager/index.md) integration | Schedule collection of information like:

* [system_info](https://www.osquery.io/schema/5.1.0/#system_info) for hostname, unique ID, and architecture
* [os_version](https://www.osquery.io/schema/5.1.0/#os_version)
* [interface_addresses](https://www.osquery.io/schema/5.1.0/#interface_addresses) for IPs and MACs
| diff --git a/reference/fleet/running-on-aks-managed-by-fleet.md b/reference/fleet/running-on-aks-managed-by-fleet.md index ad6cf537b8..345323a194 100644 --- a/reference/fleet/running-on-aks-managed-by-fleet.md +++ b/reference/fleet/running-on-aks-managed-by-fleet.md @@ -5,7 +5,7 @@ mapped_pages: # Run Elastic Agent on Azure AKS managed by Fleet [running-on-aks-managed-by-fleet] -Please follow the steps to run the {{agent}} on [Run {{agent}} on Kubernetes managed by {{fleet}}](/reference/fleet/running-on-kubernetes-managed-by-fleet.md) page. +Follow the steps to run the {{agent}} on [Run {{agent}} on Kubernetes managed by {{fleet}}](/reference/fleet/running-on-kubernetes-managed-by-fleet.md) page. ## Important notes: [_important_notes_4] diff --git a/reference/fleet/running-on-gke-managed-by-fleet.md b/reference/fleet/running-on-gke-managed-by-fleet.md index 1abe97c185..090245965d 100644 --- a/reference/fleet/running-on-gke-managed-by-fleet.md +++ b/reference/fleet/running-on-gke-managed-by-fleet.md @@ -5,7 +5,7 @@ mapped_pages: # Run Elastic Agent on GKE managed by Fleet [running-on-gke-managed-by-fleet] -Please follow the steps to run the {{agent}} on [Run {{agent}} on Kubernetes managed by {{fleet}}](/reference/fleet/running-on-kubernetes-managed-by-fleet.md) page. +Follow the steps to run the {{agent}} on [Run {{agent}} on Kubernetes managed by {{fleet}}](/reference/fleet/running-on-kubernetes-managed-by-fleet.md) page. ### Important notes: [_important_notes_2] diff --git a/reference/fleet/scaling-on-kubernetes.md b/reference/fleet/scaling-on-kubernetes.md index 0fd5651360..c864477c3c 100644 --- a/reference/fleet/scaling-on-kubernetes.md +++ b/reference/fleet/scaling-on-kubernetes.md @@ -5,7 +5,7 @@ mapped_pages: # Scaling Elastic Agent on Kubernetes [scaling-on-kubernetes] -For more information on how to deploy {{agent}} on {{k8s}}, please review these pages: +For more information on how to deploy {{agent}} on {{k8s}}, review these pages: * [Run {{agent}} on Kubernetes managed by {{fleet}}](/reference/fleet/running-on-kubernetes-managed-by-fleet.md). * [Run {{agent}} Standalone on Kubernetes](/reference/fleet/running-on-kubernetes-standalone.md). diff --git a/reference/observability/fields-and-object-schemas.md b/reference/observability/fields-and-object-schemas.md index 1391f63c89..90dad53d0f 100644 --- a/reference/observability/fields-and-object-schemas.md +++ b/reference/observability/fields-and-object-schemas.md @@ -20,7 +20,7 @@ This reference covers {{infrastructure-app}} fields. ## Infrastructure app fields [metrics-app-fields] -This section lists the required fields the {{infrastructure-app}} uses to display data. Please note that some of the fields listed are not [ECS fields](ecs://reference/index.md#_what_is_ecs). +This section lists the required fields the {{infrastructure-app}} uses to display data. Note that some of the fields listed are not [ECS fields](ecs://reference/index.md#_what_is_ecs). ### Additional field details [_additional_field_details] diff --git a/solutions/observability/apm/collect-metrics.md b/solutions/observability/apm/collect-metrics.md index c17884d55c..f4d0b70a58 100644 --- a/solutions/observability/apm/collect-metrics.md +++ b/solutions/observability/apm/collect-metrics.md @@ -10,7 +10,7 @@ applies_to: # Collect metrics [apm-open-telemetry-collect-metrics] ::::{important} -When collecting metrics, please note that the [`DoubleValueRecorder`](https://www.javadoc.io/doc/io.opentelemetry/opentelemetry-api/latest/io/opentelemetry/api/metrics/DoubleValueRecorder.md) and [`LongValueRecorder`](https://www.javadoc.io/doc/io.opentelemetry/opentelemetry-api/latest/io/opentelemetry/api/metrics/LongValueObserver.md) metrics are not yet supported. +When collecting metrics, note that the [`DoubleValueRecorder`](https://www.javadoc.io/doc/io.opentelemetry/opentelemetry-api/latest/io/opentelemetry/api/metrics/DoubleValueRecorder.md) and [`LongValueRecorder`](https://www.javadoc.io/doc/io.opentelemetry/opentelemetry-api/latest/io/opentelemetry/api/metrics/LongValueObserver.md) metrics are not yet supported. :::: Here’s an example of how to capture business metrics from a Java application. diff --git a/solutions/observability/apm/configure-kibana-endpoint.md b/solutions/observability/apm/configure-kibana-endpoint.md index 56fa16d6fd..f4ce128fde 100644 --- a/solutions/observability/apm/configure-kibana-endpoint.md +++ b/solutions/observability/apm/configure-kibana-endpoint.md @@ -13,7 +13,7 @@ applies_to: You must configure the {{kib}} endpoint when running the APM Server binary with a non-{{es}} output. Configuring the {{kib}} endpoint allows the APM Server to communicate with {{kib}} and ensure that the APM integration was properly set up. It is also required for APM agent configuration when using an output other than {{es}}. -For all other use-cases, starting in version 8.7.0, APM agent configurations is fetched directly from {{es}}. Configuring and enabling the {{kib}} endpoint is only used as a fallback. Please see [APM agent central configuration](/solutions/observability/apm/configure-apm-agent-central-configuration.md) instead. +For all other use-cases, starting in version 8.7.0, APM agent configurations is fetched directly from {{es}}. Configuring and enabling the {{kib}} endpoint is only used as a fallback. See [APM agent central configuration](/solutions/observability/apm/configure-apm-agent-central-configuration.md) instead. :::: diff --git a/solutions/observability/apm/configure-project-paths.md b/solutions/observability/apm/configure-project-paths.md index 0890e3ca3c..cff88a2d12 100644 --- a/solutions/observability/apm/configure-project-paths.md +++ b/solutions/observability/apm/configure-project-paths.md @@ -17,7 +17,7 @@ This documentation is only relevant for APM Server binary users. Fleet-managed p The `path` section of the `apm-server.yml` config file contains configuration options that define where APM Server looks for its files. For example, APM Server looks for the {{es}} template file in the configuration path and writes log files in the logs path. -Please see the [Installation layout](/solutions/observability/apm/installation-layout.md) section for more details. +See the [Installation layout](/solutions/observability/apm/installation-layout.md) section for more details. Here is an example configuration: diff --git a/solutions/observability/apm/transaction-sampling.md b/solutions/observability/apm/transaction-sampling.md index 1eefad0e1e..a6d43e144f 100644 --- a/solutions/observability/apm/transaction-sampling.md +++ b/solutions/observability/apm/transaction-sampling.md @@ -266,7 +266,7 @@ Enable tail-based sampling with [Enable tail-based sampling](/solutions/observab Trace events are matched to policies in the order specified. Each policy list must conclude with a default policy — one that only specifies a sample rate. This default policy is used to catch remaining trace events that don’t match a stricter policy. Requiring this default policy ensures that traces are only dropped intentionally. If you enable tail-based sampling and send a transaction that does not match any of the policies, APM Server will reject the transaction with the error `no matching policy`. ::::{important} -Please note that from version `9.0.0` APM Server has an unlimited storage limit, but will stop writing when the disk where the database resides reaches 80% usage. Due to how the limit is calculated and enforced, the actual disk space may still grow slightly over this disk usage based limit, or any configured storage limit. +Note that from version `9.0.0` APM Server has an unlimited storage limit, but will stop writing when the disk where the database resides reaches 80% usage. Due to how the limit is calculated and enforced, the actual disk space may still grow slightly over this disk usage based limit, or any configured storage limit. :::: ### Example configuration [_example_configuration] diff --git a/solutions/observability/apm/upstream-opentelemetry-collectors-language-sdks.md b/solutions/observability/apm/upstream-opentelemetry-collectors-language-sdks.md index 0b3d894aed..065fec81d2 100644 --- a/solutions/observability/apm/upstream-opentelemetry-collectors-language-sdks.md +++ b/solutions/observability/apm/upstream-opentelemetry-collectors-language-sdks.md @@ -185,7 +185,7 @@ java -javaagent:/path/to/opentelemetry-javaagent-all.jar \ For information on how to format an API key, see [API keys](/solutions/observability/apm/api-keys.md). - Please note the required space between `Bearer` and `an_apm_secret_token`, and `ApiKey` and `an_api_key`. + Note the required space between `Bearer` and `an_apm_secret_token`, and `ApiKey` and `an_api_key`. ::::{note} If you are using a version of the Python OpenTelemetry agent *before* 1.27.0, the content of the header *must* be URL-encoded. You can use the Python standard library’s `urllib.parse.quote` function to encode the content of the header. diff --git a/solutions/observability/cloud/monitor-amazon-web-services-aws-with-beats.md b/solutions/observability/cloud/monitor-amazon-web-services-aws-with-beats.md index dfe042eaaf..2afe6b11e9 100644 --- a/solutions/observability/cloud/monitor-amazon-web-services-aws-with-beats.md +++ b/solutions/observability/cloud/monitor-amazon-web-services-aws-with-beats.md @@ -403,7 +403,7 @@ This gives you an overview of how your S3 buckets are being accessed. To monitor your AWS infrastructure you will need to first make sure your infrastructure data are being shipped to CloudWatch. To ship the data to {{es}} we are going to use the AWS module from {{metricbeat}}. This module periodically fetches monitoring metrics from AWS CloudWatch using **GetMetricData** API for AWS services. ::::{important} -Extra AWS charges on CloudWatch API requests will be generated by this module. Please see [AWS API requests](beats://reference/metricbeat/metricbeat-module-aws.md#aws-api-requests) for more details. +Extra AWS charges on CloudWatch API requests will be generated by this module. See [AWS API requests](beats://reference/metricbeat/metricbeat-module-aws.md#aws-api-requests) for more details. :::: diff --git a/solutions/observability/cloud/monitor-google-cloud-platform-gcp.md b/solutions/observability/cloud/monitor-google-cloud-platform-gcp.md index bbb7263925..2649d0b064 100644 --- a/solutions/observability/cloud/monitor-google-cloud-platform-gcp.md +++ b/solutions/observability/cloud/monitor-google-cloud-platform-gcp.md @@ -257,7 +257,7 @@ Now that the output is working, you are going to set up the input (GCP). To collect metrics from Google Cloud Platform, use the [Google Cloud Platform](beats://reference/metricbeat/metricbeat-module-gcp.md) module. This module periodically fetches monitoring metrics from Google Cloud Platform using [Stackdriver Monitoring API](https://cloud.google.com/monitoring/api/metrics_gcp) for Google Cloud Platform services. ::::{warning} -Extra GCP charges on Stackdriver Monitoring API requests may be generated by this module. Please see [rough estimation of the number of API calls](beats://reference/metricbeat/metricbeat-module-gcp.md#gcp-api-requests) for more details. +Extra GCP charges on Stackdriver Monitoring API requests may be generated by this module. See [rough estimation of the number of API calls](beats://reference/metricbeat/metricbeat-module-gcp.md#gcp-api-requests) for more details. :::: diff --git a/solutions/observability/cloud/monitor-microsoft-azure-with-beats.md b/solutions/observability/cloud/monitor-microsoft-azure-with-beats.md index 8afc45fad4..10d2828f7f 100644 --- a/solutions/observability/cloud/monitor-microsoft-azure-with-beats.md +++ b/solutions/observability/cloud/monitor-microsoft-azure-with-beats.md @@ -335,7 +335,7 @@ Now that the output is working, you are going to set up the input (Azure). To collect metrics from Microsoft Azure, use the [{{metricbeat}} Azure module](beats://reference/metricbeat/metricbeat-module-azure.md). This module periodically fetches monitoring metrics from Microsoft Azure using the [Azure Monitor REST API](https://docs.microsoft.com/en-us/rest/api/monitor/). ::::{warning} -Extra Azure charges on metric queries my be generated by this module. Please see [additional notes about metrics and costs](beats://reference/metricbeat/metricbeat-module-azure.md#azure-api-cost) for more details. +Extra Azure charges on metric queries my be generated by this module. See [additional notes about metrics and costs](beats://reference/metricbeat/metricbeat-module-azure.md#azure-api-cost) for more details. :::: diff --git a/solutions/search/semantic-search/semantic-search-inference.md b/solutions/search/semantic-search/semantic-search-inference.md index 5e435893ff..29e2a45911 100644 --- a/solutions/search/semantic-search/semantic-search-inference.md +++ b/solutions/search/semantic-search/semantic-search-inference.md @@ -1458,7 +1458,7 @@ As a result, you receive the top 10 documents that are closest in meaning to the "_score": 0.86815524, "_source": { "id": 3041038, - "content": "For example, the cost of the fuel could be 96.9, the amount could be 10 pounds, and the distance covered could be 80 miles. To convert between Litres per 100KM and Miles Per Gallon, please provide a value and click on the required button.o calculate how much fuel you'll need for a given journey, please provide the distance in miles you will be covering on your journey, and the estimated MPG of your vehicle. To work out what MPG you are really getting, please provide the cost of the fuel, how much you spent on the fuel, and how far it took you." + "content": "For example, the cost of the fuel could be 96.9, the amount could be 10 pounds, and the distance covered could be 80 miles. To convert between Litres per 100KM and Miles Per Gallon, provide a value and click on the required button.o calculate how much fuel you'll need for a given journey, provide the distance in miles you will be covering on your journey, and the estimated MPG of your vehicle. To work out what MPG you are really getting, provide the cost of the fuel, how much you spent on the fuel, and how far it took you." } }, { diff --git a/solutions/search/site-or-app/search-ui.md b/solutions/search/site-or-app/search-ui.md index ca34e89b30..b6caf5827e 100644 --- a/solutions/search/site-or-app/search-ui.md +++ b/solutions/search/site-or-app/search-ui.md @@ -113,7 +113,7 @@ Read the [Elasticsearch Connector](search-ui://reference/api-connectors-elastics ### Where do I report issues with the Search UI? [overview-where-do-i-report-issues-with-the-search-ui] -If something is not working as expected, please open an [issue](https://github.com/elastic/search-ui/issues/new). +If something is not working as expected, open an [issue](https://github.com/elastic/search-ui/issues/new). ### Where can I go to get help? [overview-where-can-i-go-to-get-help] @@ -126,12 +126,12 @@ The Enterprise Search team at Elastic maintains this library and are happy to he We welcome contributors to the project. Before you begin, a couple notes…​ * Read the [Search UI Contributor’s Guide](https://github.com/elastic/search-ui/blob/main/CONTRIBUTING.md). -* Prior to opening a pull request, please: +* Prior to opening a pull request: * Create an issue to [discuss the scope of your proposal](https://github.com/elastic/search-ui/issues). * Sign the [Contributor License Agreement](https://www.elastic.co/contributor-agreement/). We are not asking you to assign copyright to us, but to give us the right to distribute your code without restriction. We ask this of all contributors in order to assure our users of the origin and continuing existence of the code. You only need to sign the CLA once. -* Please write simple code and concise documentation, when appropriate. +* Write simple code and concise documentation, when appropriate. ## License 📗 [overview-license] diff --git a/solutions/search/vector/knn.md b/solutions/search/vector/knn.md index 1b0af79994..ef7b5ed460 100644 --- a/solutions/search/vector/knn.md +++ b/solutions/search/vector/knn.md @@ -55,7 +55,7 @@ Exact, brute-force kNN guarantees accurate results but doesn’t scale well with ## Approximate kNN [approximate-knn] ::::{warning} -Compared to other types of search, approximate kNN search has specific resource requirements. In particular, all vector data must fit in the node’s page cache for it to be efficient. Please consult the [approximate kNN search tuning guide](/deploy-manage/production-guidance/optimize-performance/approximate-knn-search.md) for important notes on configuration and sizing. +Compared to other types of search, approximate kNN search has specific resource requirements. In particular, all vector data must fit in the node’s page cache for it to be efficient. See the [approximate kNN search tuning guide](/deploy-manage/production-guidance/optimize-performance/approximate-knn-search.md) for important notes on configuration and sizing. :::: To run an approximate kNN search, use the [`knn` option](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search#operation-search-body-application-json-knn) to search one or more `dense_vector` fields with indexing enabled. diff --git a/solutions/security/ai/ai-assistant.md b/solutions/security/ai/ai-assistant.md index 84876a5605..72805b7836 100644 --- a/solutions/security/ai/ai-assistant.md +++ b/solutions/security/ai/ai-assistant.md @@ -38,7 +38,7 @@ The Elastic AI Assistant is designed to enhance your analysis with smart dialogu Elastic does not store or examine prompts or results used by AI Assistant, or use this data for model training. This includes anything you send the model, such as alert or event data, detection rule configurations, queries, and prompts. However, any data you provide to AI Assistant will be processed by the third-party large language model (LLM) provider you connected to as part of AI Assistant setup. -Elastic does not control third-party tools, and assumes no responsibility or liability for their content, operation, or use, nor for any loss or damage that may arise from your using such tools. Please exercise caution when using AI tools with personal, sensitive, or confidential information. Any data you submit may be used by the provider for AI training or other purposes. There is no guarantee that the provider will keep any information you provide secure or confidential. You should familiarize yourself with the privacy practices and terms of use of any generative AI tools prior to use. +Elastic does not control third-party tools, and assumes no responsibility or liability for their content, operation, or use, nor for any loss or damage that may arise from your using such tools. Exercise caution when using AI tools with personal, sensitive, or confidential information. Any data you submit may be used by the provider for AI training or other purposes. There is no guarantee that the provider will keep any information you provide secure or confidential. You should familiarize yourself with the privacy practices and terms of use of any generative AI tools prior to use. ::::{note} Elastic can automatically anonymize event data that you provide to AI Assistant as context. To learn more, refer to [Configure AI Assistant](/solutions/security/ai/ai-assistant.md#configure-ai-assistant). @@ -174,7 +174,7 @@ The **Knowledge base** tab of the **Security AI settings** page allows you to en Elastic AI Assistant allows you to take full advantage of the {{elastic-sec}} platform to improve your security operations. It can help you write an {{esql}} query for a particular use case, or answer general questions about how to use the platform. Its ability to assist you depends on the specificity and detail of your questions. The more context and detail you provide, the more tailored and useful its responses will be. -To maximize its usefulness, consider using more detailed prompts or asking for additional information. For instance, after asking for an {{esql}} query example, you could ask a follow-up question like, “Could you give me some other examples?” You can also ask for clarification or further exposition, for example "Please provide comments explaining the query you just gave." +To maximize its usefulness, consider using more detailed prompts or asking for additional information. For instance, after asking for an {{esql}} query example, you could ask a follow-up question like, “Could you give me some other examples?” You can also ask for clarification or further exposition, for example "Provide comments explaining the query you just gave." In addition to practical advice, AI Assistant can offer conceptual advice, tips, and best practices for enhancing your security measures. You can ask it, for example: diff --git a/solutions/security/detect-and-alert/create-detection-rule.md b/solutions/security/detect-and-alert/create-detection-rule.md index 07a61dd56f..1b5e4c1da9 100644 --- a/solutions/security/detect-and-alert/create-detection-rule.md +++ b/solutions/security/detect-and-alert/create-detection-rule.md @@ -520,7 +520,7 @@ When configuring an {{esql}} rule’s **[Custom highlighted fields](/solutions/s ::: ::::{note} - For threshold rules, not all source event values can be used for overrides; only the fields that were aggregated over (the `Group by` fields) will contain data. Please also note that overrides are not supported for event correlation rules. + For threshold rules, not all source event values can be used for overrides; only the fields that were aggregated over (the `Group by` fields) will contain data. Also note that overrides are not supported for event correlation rules. :::: 5. **Default risk score**: A numerical value between 0 and 100 that indicates the risk of events detected by the rule. This setting changes to a default value when you change the **Severity** level, but you can adjust the risk score as needed. General guidelines are: diff --git a/troubleshoot/elasticsearch/add-tier.md b/troubleshoot/elasticsearch/add-tier.md index b2f7746777..a2bc83636a 100644 --- a/troubleshoot/elasticsearch/add-tier.md +++ b/troubleshoot/elasticsearch/add-tier.md @@ -32,7 +32,7 @@ In order to get the shards assigned we need enable a new tier in the deployment. 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. diff --git a/troubleshoot/elasticsearch/allow-all-cluster-allocation.md b/troubleshoot/elasticsearch/allow-all-cluster-allocation.md index e8f8a972fc..90bb3a2bfb 100644 --- a/troubleshoot/elasticsearch/allow-all-cluster-allocation.md +++ b/troubleshoot/elasticsearch/allow-all-cluster-allocation.md @@ -32,7 +32,7 @@ We’ll achieve this by inspecting the system-wide `cluster.routing.allocation.e 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. diff --git a/troubleshoot/elasticsearch/allow-all-index-allocation.md b/troubleshoot/elasticsearch/allow-all-index-allocation.md index aa06c14c06..9d9d8b7d62 100644 --- a/troubleshoot/elasticsearch/allow-all-index-allocation.md +++ b/troubleshoot/elasticsearch/allow-all-index-allocation.md @@ -33,7 +33,7 @@ In order to get the shards assigned we’ll need to change the value of the [con 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. diff --git a/troubleshoot/elasticsearch/decrease-disk-usage-data-node.md b/troubleshoot/elasticsearch/decrease-disk-usage-data-node.md index 282126f70c..152df48e5b 100644 --- a/troubleshoot/elasticsearch/decrease-disk-usage-data-node.md +++ b/troubleshoot/elasticsearch/decrease-disk-usage-data-node.md @@ -29,7 +29,7 @@ Reducing the replicas of an index can potentially reduce search throughput and d 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Stack Management > Index Management**. @@ -98,7 +98,7 @@ In order to estimate how many replicas need to be removed, first you need to est 4. The next step is to list all the indices and choose which replicas to reduce. ::::{note} - The following command orders the indices with descending number of replicas and primary store size. We do this to help you choose which replicas to reduce under the assumption that the more replicas you have the smaller the risk if you remove a copy and the bigger the replica the more space will be released. This does not take into consideration any functional requirements, so please see it as a mere suggestion. + The following command orders the indices with descending number of replicas and primary store size. We do this to help you choose which replicas to reduce under the assumption that the more replicas you have the smaller the risk if you remove a copy and the bigger the replica the more space will be released. This does not take into consideration any functional requirements, so see it as a mere suggestion. :::: diff --git a/troubleshoot/elasticsearch/diagnose-unassigned-shards.md b/troubleshoot/elasticsearch/diagnose-unassigned-shards.md index 4da5c3c0a2..87c32a44b8 100644 --- a/troubleshoot/elasticsearch/diagnose-unassigned-shards.md +++ b/troubleshoot/elasticsearch/diagnose-unassigned-shards.md @@ -30,7 +30,7 @@ In order to diagnose the unassigned shards, follow the next steps: 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. @@ -134,7 +134,7 @@ In order to diagnose the unassigned shards, follow the next steps: 7. Change the settings using the [update index settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings) and [cluster update settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) APIs to the correct values in order to allow the index to be allocated. -For more guidance on fixing the most common causes for unassinged shards please follow [this guide](red-yellow-cluster-status.md#fix-red-yellow-cluster-status) or contact [Elastic Support](https://support.elastic.co). +For more guidance on fixing the most common causes for unassinged shards follow [this guide](red-yellow-cluster-status.md#fix-red-yellow-cluster-status) or contact [Elastic Support](https://support.elastic.co). :::::: ::::::{tab-item} Self-managed @@ -234,7 +234,7 @@ In order to diagnose the unassigned shards follow the next steps: 4. Change the settings using the [update index settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings) and [cluster update settings](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) APIs to the correct values in order to allow the index to be allocated. -For more guidance on fixing the most common causes for unassinged shards please follow [this guide](red-yellow-cluster-status.md#fix-red-yellow-cluster-status). +For more guidance on fixing the most common causes for unassinged shards follow [this guide](red-yellow-cluster-status.md#fix-red-yellow-cluster-status). :::::: ::::::: diff --git a/troubleshoot/elasticsearch/diagnosing-corrupted-repositories.md b/troubleshoot/elasticsearch/diagnosing-corrupted-repositories.md index 15ddc7fdcf..835b2c553f 100644 --- a/troubleshoot/elasticsearch/diagnosing-corrupted-repositories.md +++ b/troubleshoot/elasticsearch/diagnosing-corrupted-repositories.md @@ -28,7 +28,7 @@ First mark the repository as read-only on the secondary deployments: 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Stack Management > Snapshot and Restore > Repositories**. diff --git a/troubleshoot/elasticsearch/elasticsearch-client-java-api-client/missing-required-property.md b/troubleshoot/elasticsearch/elasticsearch-client-java-api-client/missing-required-property.md index d2989bf4d0..36c9dfdf18 100644 --- a/troubleshoot/elasticsearch/elasticsearch-client-java-api-client/missing-required-property.md +++ b/troubleshoot/elasticsearch/elasticsearch-client-java-api-client/missing-required-property.md @@ -12,11 +12,11 @@ When an API object is built and a required property hasn’t been set, a `Missin However, there may be bugs in the [Elasticsearch API specification](https://github.com/elastic/elasticsearch-specification) where a response object’s property is incorrectly required, leading to a `MissingRequiredPropertyException` when deserializing a response. If this happens, here’s how you can work around it: * Make sure you use the latest release of the Java API Client. The issue may already have been fixed. -* If the issue is still present on the latest version, [open an issue](https://github.com/elastic/elasticsearch-java/issues/new/choose) so that we can fix it in the next release. Please help us to improve the Java API Client. +* If the issue is still present on the latest version, [open an issue](https://github.com/elastic/elasticsearch-java/issues/new/choose) so that we can fix it in the next release. Help us to improve the Java API Client. * Temporarily disable required property checks for the offending request: ::::{warning} -This is a workaround. Do not consider this as a permanent solution, and please [open an issue](https://github.com/elastic/elasticsearch-java/issues/new/choose) so that the problem can be fixed in a future release. +This is a workaround. Do not consider this as a permanent solution, and [open an issue](https://github.com/elastic/elasticsearch-java/issues/new/choose) so that the problem can be fixed in a future release. :::: diff --git a/troubleshoot/elasticsearch/increase-capacity-data-node.md b/troubleshoot/elasticsearch/increase-capacity-data-node.md index dcf3a796eb..dacf001e6b 100644 --- a/troubleshoot/elasticsearch/increase-capacity-data-node.md +++ b/troubleshoot/elasticsearch/increase-capacity-data-node.md @@ -20,7 +20,7 @@ In order to increase the disk capacity of the data nodes in your cluster: 1. Log in to the [{{ecloud}} console](https://cloud.elastic.co?page=docs&placement=docs-body). 2. On the **Hosted deployments** panel, click the gear under the `Manage deployment` column that corresponds to the name of your deployment. -3. If autoscaling is available but not enabled, please enable it. You can do this by clicking the button `Enable autoscaling` on a banner like the one below: +3. If autoscaling is available but not enabled, enable it. You can do this by clicking the button `Enable autoscaling` on a banner like the one below: :::{image} /troubleshoot/images/elasticsearch-reference-autoscaling_banner.png :alt: Autoscaling banner @@ -34,7 +34,7 @@ In order to increase the disk capacity of the data nodes in your cluster: :screenshot: ::: -4. If autoscaling has succeeded the cluster should return to `healthy` status. If the cluster is still out of disk, please check if autoscaling has reached its limits. You will be notified about this by the following banner: +4. If autoscaling has succeeded the cluster should return to `healthy` status. If the cluster is still out of disk, check if autoscaling has reached its limits. You will be notified about this by the following banner: :::{image} /troubleshoot/images/elasticsearch-reference-autoscaling_limits_banner.png :alt: Autoscaling banner diff --git a/troubleshoot/elasticsearch/increase-cluster-shard-limit.md b/troubleshoot/elasticsearch/increase-cluster-shard-limit.md index 5cb209c900..9e05b8104c 100644 --- a/troubleshoot/elasticsearch/increase-cluster-shard-limit.md +++ b/troubleshoot/elasticsearch/increase-cluster-shard-limit.md @@ -32,7 +32,7 @@ In order to get the shards assigned we’ll need to increase the number of shard 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. diff --git a/troubleshoot/elasticsearch/increase-shard-limit.md b/troubleshoot/elasticsearch/increase-shard-limit.md index 28d7f49537..c92fcd7a3f 100644 --- a/troubleshoot/elasticsearch/increase-shard-limit.md +++ b/troubleshoot/elasticsearch/increase-shard-limit.md @@ -32,7 +32,7 @@ In order to get the shards assigned we’ll need to increase the number of shard 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. diff --git a/troubleshoot/elasticsearch/increase-tier-capacity.md b/troubleshoot/elasticsearch/increase-tier-capacity.md index e575956bd6..da23936a4d 100644 --- a/troubleshoot/elasticsearch/increase-tier-capacity.md +++ b/troubleshoot/elasticsearch/increase-tier-capacity.md @@ -30,7 +30,7 @@ One way to get the replica shards assigned is to add an availability zone. This 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. diff --git a/troubleshoot/elasticsearch/mapping-explosion.md b/troubleshoot/elasticsearch/mapping-explosion.md index 55cdba45d5..8d66c61103 100644 --- a/troubleshoot/elasticsearch/mapping-explosion.md +++ b/troubleshoot/elasticsearch/mapping-explosion.md @@ -32,7 +32,7 @@ Mapping explosion may surface as the following performance symptoms: If your index mapped fields expect to contain a large, arbitrary set of keys, you may instead consider: * Setting [`index.mapping.total_fields.ignore_dynamic_beyond_limit`](elasticsearch://reference/elasticsearch/index-settings/mapping-limit.md) to `true`. Instead of rejecting documents that exceed the field limit, this will ignore dynamic fields once the limit is reached. -* Using the [flattened](elasticsearch://reference/elasticsearch/mapping-reference/flattened.md) data type. Please note, however, that flattened objects is [not fully supported in {{kib}}](https://github.com/elastic/kibana/issues/25820) yet. For example, this could apply to sub-mappings like { `host.name` , `host.os`, `host.version` }. Desired fields are still accessed by [runtime fields](../../manage-data/data-store/mapping/define-runtime-fields-in-search-request.md). +* Using the [flattened](elasticsearch://reference/elasticsearch/mapping-reference/flattened.md) data type. Note, however, that flattened objects is [not fully supported in {{kib}}](https://github.com/elastic/kibana/issues/25820) yet. For example, this could apply to sub-mappings like { `host.name` , `host.os`, `host.version` }. Desired fields are still accessed by [runtime fields](../../manage-data/data-store/mapping/define-runtime-fields-in-search-request.md). * Disable [dynamic mappings](../../manage-data/data-store/mapping.md). This cannot effect current index mapping, but can apply going forward via an [index template](../../manage-data/data-store/templates.md). Modifying to the [nested](elasticsearch://reference/elasticsearch/mapping-reference/nested.md) data type would not resolve the core issue. diff --git a/troubleshoot/elasticsearch/remote-clusters.md b/troubleshoot/elasticsearch/remote-clusters.md index 5161a3c1f9..adfe6142c0 100644 --- a/troubleshoot/elasticsearch/remote-clusters.md +++ b/troubleshoot/elasticsearch/remote-clusters.md @@ -298,10 +298,10 @@ The remote cluster logs `client did not trust this server's certificate`: Even if TLS verification is not an issue, the connection fails due to missing credentials. -The local cluster logs `Please ensure you have configured remote cluster credentials`: +The local cluster logs `Make sure you have configured remote cluster credentials`: ```txt -Caused by: java.lang.IllegalArgumentException: Cross cluster requests through the dedicated remote cluster server port require transport header [_cross_cluster_access_credentials] but none found. **Please ensure you have configured remote cluster credentials** on the cluster originating the request. +Caused by: java.lang.IllegalArgumentException: Cross cluster requests through the dedicated remote cluster server port require transport header [_cross_cluster_access_credentials] but none found. **Make sure you have configured remote cluster credentials** on the cluster originating the request. ``` This does not show up in the logs of the remote cluster. diff --git a/troubleshoot/elasticsearch/repeated-snapshot-failures.md b/troubleshoot/elasticsearch/repeated-snapshot-failures.md index 9335b9729c..cd55f29bc9 100644 --- a/troubleshoot/elasticsearch/repeated-snapshot-failures.md +++ b/troubleshoot/elasticsearch/repeated-snapshot-failures.md @@ -30,7 +30,7 @@ In order to check the status of failing {{slm}} policies we need to go to Kibana 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. diff --git a/troubleshoot/elasticsearch/restore-from-snapshot.md b/troubleshoot/elasticsearch/restore-from-snapshot.md index 8d6ab61fbe..96ede45ade 100644 --- a/troubleshoot/elasticsearch/restore-from-snapshot.md +++ b/troubleshoot/elasticsearch/restore-from-snapshot.md @@ -15,7 +15,7 @@ mapped_pages: Elasticsearch is using snapshots to store a copy of your data outside a cluster. You can restore a snapshot to recover indices and data streams for which there are no copies of the shards in the cluster. This can happen if the data (indices or data streams) was deleted or if the cluster membership changed and the current nodes in the system do not contain a copy of the data anymore. ::::{important} -Restoring the missing data requires you to have a backup of the affected indices and data streams that is up-to-date enough for your use case. Please do not proceed without confirming this. +Restoring the missing data requires you to have a backup of the affected indices and data streams that is up-to-date enough for your use case. Don't proceed without confirming this. :::: @@ -30,7 +30,7 @@ In order to restore the indices and data streams that are missing data: 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. diff --git a/troubleshoot/elasticsearch/security/trb-security-kerberos.md b/troubleshoot/elasticsearch/security/trb-security-kerberos.md index 8559b5cd95..4ae0ddee55 100644 --- a/troubleshoot/elasticsearch/security/trb-security-kerberos.md +++ b/troubleshoot/elasticsearch/security/trb-security-kerberos.md @@ -22,16 +22,16 @@ mapped_pages: `Failure unspecified at GSS-API level (Mechanism level: Checksum failed)` : When you see this error message on the HTTP client side, then it may be related to an incorrect password. -When you see this error message in the {{es}} server logs, then it may be related to the {{es}} service keytab. The keytab file is present but it failed to log in as the user. Please check the keytab expiry. Also check whether the keytab contain up-to-date credentials; if not, replace them. +When you see this error message in the {{es}} server logs, then it may be related to the {{es}} service keytab. The keytab file is present but it failed to log in as the user. Check the keytab expiry. Also check whether the keytab contain up-to-date credentials; if not, replace them. -You can use tools like `klist` or `ktab` to list principals inside the keytab and validate them. You can use `kinit` to see if you can acquire initial tickets using the keytab. Please check the tools and their documentation in your Kerberos environment. +You can use tools like `klist` or `ktab` to list principals inside the keytab and validate them. You can use `kinit` to see if you can acquire initial tickets using the keytab. Check the tools and their documentation in your Kerberos environment. -Kerberos depends on proper hostname resolution, so please check your DNS infrastructure. Incorrect DNS setup, DNS SRV records or configuration for KDC servers in `krb5.conf` can cause problems with hostname resolution. +Kerberos depends on proper hostname resolution, so check your DNS infrastructure. Incorrect DNS setup, DNS SRV records or configuration for KDC servers in `krb5.conf` can cause problems with hostname resolution. `Failure unspecified at GSS-API level (Mechanism level: Request is a replay (34))` `Failure unspecified at GSS-API level (Mechanism level: Clock skew too great (37))` -: To prevent replay attacks, Kerberos V5 sets a maximum tolerance for computer clock synchronization and it is typically 5 minutes. Please check whether the time on the machines within the domain is in sync. +: To prevent replay attacks, Kerberos V5 sets a maximum tolerance for computer clock synchronization and it is typically 5 minutes. Check whether the time on the machines within the domain is in sync. `gss_init_sec_context() failed: An unsupported mechanism was requested` diff --git a/troubleshoot/elasticsearch/security/trb-security-saml.md b/troubleshoot/elasticsearch/security/trb-security-saml.md index 223504b4f6..d5ff2e4e85 100644 --- a/troubleshoot/elasticsearch/security/trb-security-saml.md +++ b/troubleshoot/elasticsearch/security/trb-security-saml.md @@ -180,7 +180,7 @@ Some of the common SAML problems are shown below with tips on how to resolve the To resolve this issue, ensure that in your {{kib}} configuration `xpack.security.sameSiteCookies` is not set to `Strict`. Depending on your configuration, you may be able to rely on the default value or explicitly set the value to `None`. - For further information, please read [MDN SameSite cookies](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite) + For further information, read [MDN SameSite cookies](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite) If you serve multiple {{kib}} installations behind a load balancer make sure to use the [same security configuration](/deploy-manage/production-guidance/kibana-load-balance-traffic.md#load-balancing-kibana) for all installations. diff --git a/troubleshoot/elasticsearch/security/trb-security-setup.md b/troubleshoot/elasticsearch/security/trb-security-setup.md index 67d414db13..edbd9a81d0 100644 --- a/troubleshoot/elasticsearch/security/trb-security-setup.md +++ b/troubleshoot/elasticsearch/security/trb-security-setup.md @@ -37,7 +37,7 @@ The [elasticsearch-setup-passwords command](elasticsearch://reference/elasticsea PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target - Please check the elasticsearch SSL settings under + Check the elasticsearch SSL settings under xpack.security.http.ssl. ... ERROR: Failed to establish SSL connection to elasticsearch at @@ -52,7 +52,7 @@ The [elasticsearch-setup-passwords command](elasticsearch://reference/elasticsea failed: java.security.cert.CertificateException: No subject alternative DNS name matching elasticsearch.example.com found. - Please check the elasticsearch SSL settings under + Check the elasticsearch SSL settings under xpack.security.http.ssl. ... ERROR: Failed to establish SSL connection to elasticsearch at diff --git a/troubleshoot/elasticsearch/security/trb-security-ssl.md b/troubleshoot/elasticsearch/security/trb-security-ssl.md index 1ce9b64fd7..2842e5d0f5 100644 --- a/troubleshoot/elasticsearch/security/trb-security-ssl.md +++ b/troubleshoot/elasticsearch/security/trb-security-ssl.md @@ -20,23 +20,23 @@ mapped_pages: **Resolution:** `WARN: received plaintext http traffic on a https channel, closing connection` -: Indicates that there was an incoming plaintext http request. This typically occurs when an external applications attempts to make an unencrypted call to the REST interface. Please ensure that all applications are using `https` when calling the REST interface with SSL enabled. +: Indicates that there was an incoming plaintext http request. This typically occurs when an external applications attempts to make an unencrypted call to the REST interface. Make sure that all applications are using `https` when calling the REST interface with SSL enabled. `org.elasticsearch.common.netty.handler.ssl.NotSslRecordException: not an SSL/TLS record:` -: Indicates that there was incoming plaintext traffic on an SSL connection. This typically occurs when a node is not configured to use encrypted communication and tries to connect to nodes that are using encrypted communication. Please verify that all nodes are using the same setting for `xpack.security.transport.ssl.enabled`. +: Indicates that there was incoming plaintext traffic on an SSL connection. This typically occurs when a node is not configured to use encrypted communication and tries to connect to nodes that are using encrypted communication. Verify that all nodes are using the same setting for `xpack.security.transport.ssl.enabled`. For more information about this setting, see [Security settings](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md). `java.io.StreamCorruptedException: invalid internal transport message format, got` -: Indicates an issue with data received on the transport interface in an unknown format. This can happen when a node with encrypted communication enabled connects to a node that has encrypted communication disabled. Please verify that all nodes are using the same setting for `xpack.security.transport.ssl.enabled`. +: Indicates an issue with data received on the transport interface in an unknown format. This can happen when a node with encrypted communication enabled connects to a node that has encrypted communication disabled. Verify that all nodes are using the same setting for `xpack.security.transport.ssl.enabled`. For more information about this setting, see [Security settings](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md). `java.lang.IllegalArgumentException: empty text` -: This exception is typically seen when a `https` request is made to a node that is not using `https`. If `https` is desired, please ensure the following setting is in `elasticsearch.yml`: +: This exception is typically seen when a `https` request is made to a node that is not using `https`. If `https` is desired, ensure the following setting is in `elasticsearch.yml`: ```yaml xpack.security.http.ssl.enabled: true diff --git a/troubleshoot/elasticsearch/start-ilm.md b/troubleshoot/elasticsearch/start-ilm.md index b30bdaed50..bb6ce85641 100644 --- a/troubleshoot/elasticsearch/start-ilm.md +++ b/troubleshoot/elasticsearch/start-ilm.md @@ -41,7 +41,7 @@ In order to start {{ilm}} we need to go to Kibana and execute the [start command 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. @@ -129,7 +129,7 @@ In order to start {{slm}} we need to go to Kibana and execute the [start command 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. diff --git a/troubleshoot/elasticsearch/troubleshoot-migrate-to-tiers.md b/troubleshoot/elasticsearch/troubleshoot-migrate-to-tiers.md index 2d115562fb..44b7f8c32f 100644 --- a/troubleshoot/elasticsearch/troubleshoot-migrate-to-tiers.md +++ b/troubleshoot/elasticsearch/troubleshoot-migrate-to-tiers.md @@ -32,7 +32,7 @@ In order to get the shards assigned we need to call the [migrate to data tiers r 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. diff --git a/troubleshoot/elasticsearch/troubleshooting-shards-capacity-issues.md b/troubleshoot/elasticsearch/troubleshooting-shards-capacity-issues.md index 2c96a82087..fa2e0d6cfa 100644 --- a/troubleshoot/elasticsearch/troubleshooting-shards-capacity-issues.md +++ b/troubleshoot/elasticsearch/troubleshooting-shards-capacity-issues.md @@ -33,7 +33,7 @@ If you’re confident your changes won’t destabilize the cluster, you can temp 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. @@ -243,7 +243,7 @@ If you’re confident your changes won’t destabilize the cluster, you can temp 2. On the **Hosted deployments** panel, click the name of your deployment. ::::{note} - If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case please contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). + If the name of your deployment is disabled your {{kib}} instances might be unhealthy, in which case contact [Elastic Support](https://support.elastic.co). If your deployment doesn’t include {{kib}}, all you need to do is [enable it first](../../deploy-manage/deploy/elastic-cloud/access-kibana.md). :::: 3. Open your deployment’s side navigation menu (placed under the Elastic logo in the upper left corner) and go to **Dev Tools > Console**. diff --git a/troubleshoot/ingest/elastic-serverless-forwarder.md b/troubleshoot/ingest/elastic-serverless-forwarder.md index b64811f768..901d2a7bf0 100644 --- a/troubleshoot/ingest/elastic-serverless-forwarder.md +++ b/troubleshoot/ingest/elastic-serverless-forwarder.md @@ -43,7 +43,7 @@ To help with debugging, you can increase the amount of logging detail by adding Version 1.6.0 introduces a new event ID format that prevents duplicate ID errors when a high volume of events is ingested to {{es}}. This new format combines a timestamp with data specific to the relevant AWS resource, extracted from the AWS Lambda event received by the forwarder. -The timestamp is used as a prefix for the ID, because identifiers that gradually increase over time generally result in better indexing performance in {{es}}, based on sorting order rather than completely random identifiers. For more information, please refer to [this Elastic blog on event-based data](https://www.elastic.co/blog/efficient-duplicate-prevention-for-event-based-data-in-elasticsearch). +The timestamp is used as a prefix for the ID, because identifiers that gradually increase over time generally result in better indexing performance in {{es}}, based on sorting order rather than completely random identifiers. For more information, refer to [this Elastic blog on event-based data](https://www.elastic.co/blog/efficient-duplicate-prevention-for-event-based-data-in-elasticsearch). ::::{note} If old events that are already published to {{es}} using a version of Elastic Serverless Forwarder before v1.6.0 are ingested again, they will be treated as new events and published to {{es}} as duplicates. diff --git a/troubleshoot/ingest/fleet/common-problems.md b/troubleshoot/ingest/fleet/common-problems.md index 8023dfbd25..2318a06237 100644 --- a/troubleshoot/ingest/fleet/common-problems.md +++ b/troubleshoot/ingest/fleet/common-problems.md @@ -8,7 +8,7 @@ applies_to: # Common problems [fleet-troubleshooting] -We have collected the most common known problems and listed them here. If your problem is not described here, please review the open issues in the following GitHub repositories: +We have collected the most common known problems and listed them here. If your problem is not described here, review the open issues in the following GitHub repositories: | Repository | To review or report issues about | | --- | --- | @@ -210,7 +210,7 @@ To fix this problem, add your CA certificate file path to the {{kib}} startup fi 1. To investigate the error, open your browser’s development console. 2. Select the **Network** tab, and refresh the page. - One of the requests to the {{fleet}} API will most likely have returned an error. If the error message doesn’t give you enough information to fix the problem, please contact us in the [discuss forum](https://discuss.elastic.co/). + One of the requests to the {{fleet}} API will most likely have returned an error. If the error message doesn’t give you enough information to fix the problem, contact us in the [discuss forum](https://discuss.elastic.co/). @@ -301,7 +301,7 @@ When creating an issue or sending a support forum communication, this section ca ``` ::::{note} - Both of the above commands are accessible via Windows or macOS with their OS-specific slight variation in how you call them. If needed, please refer to [*Install {{agent}}s*](/reference/fleet/install-elastic-agents.md) for examples of how to adjust them. + Both of the above commands are accessible via Windows or macOS with their OS-specific slight variation in how you call them. If needed, refer to [*Install {{agent}}s*](/reference/fleet/install-elastic-agents.md) for examples of how to adjust them. :::: @@ -347,12 +347,12 @@ The {{agent}} diagnostics bundle collects the following information: Note that the diagnostics bundle is intended for debugging purposes only, its structure may change between releases. ::::{important} -{{agent}} attempts to automatically redact credentials and API keys when creating diagnostics. Please review the contents of the archive before sharing to ensure that there are no credentials in plain text. +{{agent}} attempts to automatically redact credentials and API keys when creating diagnostics. Review the contents of the archive before sharing to ensure that there are no credentials in plain text. :::: ::::{important} -The ZIP archive containing diagnostics information will include the raw events of documents sent to the {{agent}} output. By default, it will log only the failing events as `warn`. When the `debug` logging level is enabled, all events are logged. Please review the contents of the archive before sharing to ensure that no sensitive information is included. +The ZIP archive containing diagnostics information will include the raw events of documents sent to the {{agent}} output. By default, it will log only the failing events as `warn`. When the `debug` logging level is enabled, all events are logged. Review the contents of the archive before sharing to ensure that no sensitive information is included. :::: diff --git a/troubleshoot/ingest/logstash.md b/troubleshoot/ingest/logstash.md index d2a2635eab..1acb8ce717 100644 --- a/troubleshoot/ingest/logstash.md +++ b/troubleshoot/ingest/logstash.md @@ -75,7 +75,7 @@ Try adding these values to the `jvm.options` file. **Notes:** * These settings allow Logstash to start without warnings. -* This workaround has been tested with simple pipelines. If you have experiences to share, please comment in the [issue](https://github.com/elastic/logstash/issues/10496). +* This workaround has been tested with simple pipelines. If you have experiences to share, comment in the [issue](https://github.com/elastic/logstash/issues/10496). ### *Permission denied - NUL* errors on Windows [ts-windows-permission-denied-NUL] @@ -239,7 +239,7 @@ input {stdin{}} output {stdout{}} For more complex pipelines, the problem could be caused by a series of plugins in a specific order. Troubleshooting these pipelines usually requires trial and error. Start by systematically removing input and output plugins until you’re left with the minimum set that manifest the issue. -We want to expand this section to make it more helpful. If you have troubleshooting tips to share, please: +We want to expand this section to make it more helpful. If you have troubleshooting tips to share: * create an issue at [https://github.com/elastic/logstash/issues](https://github.com/elastic/logstash/issues), or * create a pull request with your proposed changes at [https://github.com/elastic/logstash](https://github.com/elastic/logstash). @@ -283,7 +283,7 @@ Without setting this flag, json log would contain objects like: } ``` -Please note the duplication of `message` field, while being technically valid json, it is not always parsed correctly. +Note the duplication of `message` field, while being technically valid json, it is not always parsed correctly. **Solution** In `config/logstash.yml` enable the strict json flag: diff --git a/troubleshoot/kibana/migration-failures.md b/troubleshoot/kibana/migration-failures.md index dcc4f4ce04..8ad4eb9de1 100644 --- a/troubleshoot/kibana/migration-failures.md +++ b/troubleshoot/kibana/migration-failures.md @@ -31,7 +31,7 @@ Saved objects that are corrupted through manual editing or integrations cause mi For example, you receive the following error message: ```sh -Unable to migrate the corrupt saved object document with _id: 'marketing_space:dashboard:e3c5fc71-ac71-4805-bcab-2bcc9cc93275'. To allow migrations to proceed, please delete this document from the [.kibana_7.12.0_001] index. +Unable to migrate the corrupt saved object document with _id: 'marketing_space:dashboard:e3c5fc71-ac71-4805-bcab-2bcc9cc93275'. To allow migrations to proceed, delete this document from the [.kibana_7.12.0_001] index. ``` To delete the documents that cause migrations to fail, take the following steps: @@ -177,7 +177,7 @@ If routing allocation is the issue, the `_cluster/allocation/explain` API will r Upgrade migrations fail because routing allocation is disabled or restricted (`cluster.routing.allocation.enable: none/primaries/new_primaries`), which causes {{kib}} to log errors such as: ```sh -Unable to complete saved object migrations for the [.kibana] index: [incompatible_cluster_routing_allocation] The elasticsearch cluster has cluster routing allocation incorrectly set for migrations to continue. To proceed, please remove the cluster routing allocation settings with PUT /_cluster/settings {"transient": {"cluster.routing.allocation.enable": null}, "persistent": {"cluster.routing.allocation.enable": null}} +Unable to complete saved object migrations for the [.kibana] index: [incompatible_cluster_routing_allocation] The elasticsearch cluster has cluster routing allocation incorrectly set for migrations to continue. To proceed, remove the cluster routing allocation settings with PUT /_cluster/settings {"transient": {"cluster.routing.allocation.enable": null}, "persistent": {"cluster.routing.allocation.enable": null}} ``` To get around the issue, remove the transient and persisted routing allocation settings: diff --git a/troubleshoot/kibana/task-manager.md b/troubleshoot/kibana/task-manager.md index 723027779e..69249af45b 100644 --- a/troubleshoot/kibana/task-manager.md +++ b/troubleshoot/kibana/task-manager.md @@ -11,7 +11,7 @@ mapped_pages: Task Manager is used by a wide range of services in {{kib}}, such as [Alerting](../../deploy-manage/production-guidance/kibana-alerting-production-considerations.md), Actions, Reporting, and Telemetry. Unexpected behavior in these services might be a downstream issue originating in Task Manager. -This page describes how to resolve common problems you might encounter with Task Manager. If your problem isn’t described here, please review open issues in the following GitHub repositories: +This page describes how to resolve common problems you might encounter with Task Manager. If your problem isn’t described here, review open issues in the following GitHub repositories: * [{{kib}}](https://github.com/elastic/kibana/issues) ([Task Manager issues](https://github.com/elastic/kibana/issues?q=is%3Aopen+is%3Aissue+label%3A%22Feature%3ATask+Manager%22)) diff --git a/troubleshoot/observability/amazon-data-firehose.md b/troubleshoot/observability/amazon-data-firehose.md index 239b8a9e74..3f671b17e9 100644 --- a/troubleshoot/observability/amazon-data-firehose.md +++ b/troubleshoot/observability/amazon-data-firehose.md @@ -27,5 +27,5 @@ There are several facets to optimizing the underlying Elasticsearch performance, ## Support [aws-firehose-troubleshooting-support] -If you encounter further problems, please [contact us](/troubleshoot/index.md#contact-us). +If you encounter further problems, [contact us](/troubleshoot/index.md#contact-us). diff --git a/troubleshoot/observability/apm-agent-dotnet/apm-net-agent.md b/troubleshoot/observability/apm-agent-dotnet/apm-net-agent.md index d82ff142c3..1d74c723ee 100644 --- a/troubleshoot/observability/apm-agent-dotnet/apm-net-agent.md +++ b/troubleshoot/observability/apm-agent-dotnet/apm-net-agent.md @@ -12,10 +12,10 @@ applies_to: Use the information in this section to troubleshoot common problems and find answers for frequently asked questions. As a first step, ensure your stack is compatible with the Agent’s [supported technologies](apm-agent-dotnet://reference/supported-technologies.md). -Don’t worry if you can’t figure out what the problem is; we’re here to help. If you are an existing Elastic customer with a support contract, please create a ticket in the [Elastic Support portal](https://support.elastic.co/customers/s/login/). If not, post in the [APM discuss forum](https://discuss.elastic.co/c/apm). +Don’t worry if you can’t figure out what the problem is; we’re here to help. If you are an existing Elastic customer with a support contract, create a ticket in the [Elastic Support portal](https://support.elastic.co/customers/s/login/). If not, post in the [APM discuss forum](https://discuss.elastic.co/c/apm). ::::{important} -**Please attach your debug logs** so that we can analyze the problem. Upload the **complete** logs to a service like [https://gist.github.com](https://gist.github.com). The logs should include everything from the application startup up until the first request has been executed. +**Attach your debug logs** so that we can analyze the problem. Upload the **complete** logs to a service like [https://gist.github.com](https://gist.github.com). The logs should include everything from the application startup up until the first request has been executed. :::: diff --git a/troubleshoot/observability/apm-agent-go/apm-go-agent.md b/troubleshoot/observability/apm-agent-go/apm-go-agent.md index 82bd452f85..05fe4bb633 100644 --- a/troubleshoot/observability/apm-agent-go/apm-go-agent.md +++ b/troubleshoot/observability/apm-agent-go/apm-go-agent.md @@ -12,10 +12,10 @@ applies_to: Is something not working as expected? Don’t worry if you can’t figure out what the problem is; we’re here to help! First, ensure your app is compatible with the agent’s [supported technologies](apm-agent-go://reference/supported-technologies.md). -If you’re an existing Elastic customer with a support contract, please create a ticket in the [Elastic Support portal](https://support.elastic.co/customers/s/login/). Other users can post in the [APM discuss forum](https://discuss.elastic.co/c/apm). +If you’re an existing Elastic customer with a support contract, create a ticket in the [Elastic Support portal](https://support.elastic.co/customers/s/login/). Other users can post in the [APM discuss forum](https://discuss.elastic.co/c/apm). ::::{important} -**Please upload your complete debug logs** to a service like [GitHub Gist](https://gist.github.com) so that we can analyze the problem. Logs should include everything from when the application starts up until the first request executes. Instructions for enabling logging are below. +**Upload your complete debug logs** to a service like [GitHub Gist](https://gist.github.com) so that we can analyze the problem. Logs should include everything from when the application starts up until the first request executes. Instructions for enabling logging are below. :::: diff --git a/troubleshoot/observability/apm-agent-java/apm-java-agent.md b/troubleshoot/observability/apm-agent-java/apm-java-agent.md index b919204415..10efa1099f 100644 --- a/troubleshoot/observability/apm-agent-java/apm-java-agent.md +++ b/troubleshoot/observability/apm-agent-java/apm-java-agent.md @@ -12,12 +12,12 @@ applies_to: Something isn’t quite working as expected? Here are some guidelines how to find out what’s going wrong. -As a first step, please check if your stack is compatible with the currently [supported technologies](apm-agent-java://reference/set-up-apm-java-agent.md#supported-technologies). +As a first step, check if your stack is compatible with the currently [supported technologies](apm-agent-java://reference/set-up-apm-java-agent.md#supported-technologies). Don’t worry if you can’t figure out what the problem is. Open a topic in the [APM discuss forum](https://discuss.elastic.co/c/apm) and we will help you out. ::::{important} -If you do so, **please attach your debug logs** so that we can analyze the problem. Upload the **complete** logs to a service like [https://gist.github.com](https://gist.github.com). The logs should include everything from the application startup up until the first request has been executed. In addition to agent and application logs, look for `[elastic-apm-agent]` entries in all of your service’s std out and std error logs, where we sometimes print useful information when logging is unavailable. +If you do so, **attach your debug logs** so that we can analyze the problem. Upload the **complete** logs to a service like [https://gist.github.com](https://gist.github.com). The logs should include everything from the application startup up until the first request has been executed. In addition to agent and application logs, look for `[elastic-apm-agent]` entries in all of your service’s std out and std error logs, where we sometimes print useful information when logging is unavailable. :::: @@ -53,7 +53,7 @@ Set the log level to `DEBUG` or even `TRACE` to get more information about the b * All data sent to apm-server is included in JSON format -Please always post the whole content of your log files when asking for help. Use the [procedure](#trouble-shooting-logging-procedure) to ensure consistent logs when reporting potential issues. +Always post the whole content of your log files when asking for help. Use the [procedure](#trouble-shooting-logging-procedure) to ensure consistent logs when reporting potential issues. When the agent starts up, you should see logs similar to these: @@ -197,7 +197,7 @@ There are two reasons why this might happen: Requests reach a servlet : The Agent has a *pre-filter* heuristic to only consider classes whose names end with *Servlet*. This heuristic can be disabled by setting the internal configuration `enable_type_matching_name_pre_filtering=false`. - Please note that this has an impact on all plugins. A small increase of overhead during application startup time is expected. + Note that this has an impact on all plugins. A small increase of overhead during application startup time is expected. Requests do not reach a servlet @@ -260,12 +260,12 @@ Known issues: * When [`profiling_inferred_spans_enabled` ([1.15.0] experimental)](apm-agent-java://reference/config-profiling.md#config-profiling-inferred-spans-enabled) is set to `true`, it uses a native library that collects low-level information from the JVM. All known issues so far had been fixed. Try to disable it if you think the crash may be related. We continuously upgrade to the latest async profiler version, so upgrading your agent to the latest version may already contain a fix. -Whenever you encounter a JVM crash, please report through [our forum](https://discuss.elastic.co/c/observability/apm/58) or by opening an issue on our [GitHub repository](https://github.com/elastic/apm-agent-java). Look for the crash log (e.g. an `hs_err_pid.log`) and provide it when reporting, as well as all factors describing you setup and scenario. +Whenever you encounter a JVM crash, report through [our forum](https://discuss.elastic.co/c/observability/apm/58) or by opening an issue on our [GitHub repository](https://github.com/elastic/apm-agent-java). Look for the crash log (e.g. an `hs_err_pid.log`) and provide it when reporting, as well as all factors describing you setup and scenario. ### JVM Hangs [trouble-shooting-jvm-hangs] -If your JVM gets hang when attaching the Java agent, please create a thread dump (e.g. through `jstack`) and report through [our forum](https://discuss.elastic.co/c/observability/apm/58) or by opening an issue on our [GitHub repository](https://github.com/elastic/apm-agent-java). +If your JVM gets hang when attaching the Java agent, create a thread dump (e.g. through `jstack`) and report through [our forum](https://discuss.elastic.co/c/observability/apm/58) or by opening an issue on our [GitHub repository](https://github.com/elastic/apm-agent-java). ### Custom Java runtimes using `jlink` [trouble-shooting-jlink] diff --git a/troubleshoot/observability/apm-agent-nodejs/apm-nodejs-agent.md b/troubleshoot/observability/apm-agent-nodejs/apm-nodejs-agent.md index c98921db88..1236dcac13 100644 --- a/troubleshoot/observability/apm-agent-nodejs/apm-nodejs-agent.md +++ b/troubleshoot/observability/apm-agent-nodejs/apm-nodejs-agent.md @@ -12,10 +12,10 @@ applies_to: Is something not working as expected? Don’t worry if you can’t figure out what the problem is; we’re here to help! As a first step, ensure your app is compatible with the agent’s [supported technologies](apm-agent-nodejs://reference/supported-technologies.md). -If you’re an existing Elastic customer with a support contract, please create a ticket in the [Elastic Support portal](https://support.elastic.co/customers/s/login/). Other users can post in the [APM discuss forum](https://discuss.elastic.co/c/apm). +If you’re an existing Elastic customer with a support contract, create a ticket in the [Elastic Support portal](https://support.elastic.co/customers/s/login/). Other users can post in the [APM discuss forum](https://discuss.elastic.co/c/apm). ::::{important} -**Please upload your complete debug logs** to a service like [GitHub Gist](https://gist.github.com) so that we can analyze the problem. Logs should include everything from when the application starts up until the first request executes. See [Debug mode](#debug-mode) for more information. +**Upload your complete debug logs** to a service like [GitHub Gist](https://gist.github.com) so that we can analyze the problem. Logs should include everything from when the application starts up until the first request executes. See [Debug mode](#debug-mode) for more information. :::: @@ -42,7 +42,7 @@ For example: ELASTIC_APM_LOG_LEVEL=trace ELASTIC_APM_LOGGER=false node app.js | tee -a apm-debug.log ``` -If you are capturing debugging output for Elastic support, for help on the Elastic forums, or for a GitHub issue, **please upload the complete debug output** to a service like [GitHub Gist](https://gist.github.com) so that we can analyze the problem. +If you are capturing debugging output for Elastic support, for help on the Elastic forums, or for a GitHub issue, **upload the complete debug output** to a service like [GitHub Gist](https://gist.github.com) so that we can analyze the problem. ## Common problems [common-problems] diff --git a/troubleshoot/observability/apm-agent-php/apm-php-agent.md b/troubleshoot/observability/apm-agent-php/apm-php-agent.md index 8fb54b5de3..e1875c95df 100644 --- a/troubleshoot/observability/apm-agent-php/apm-php-agent.md +++ b/troubleshoot/observability/apm-agent-php/apm-php-agent.md @@ -12,10 +12,10 @@ applies_to: Is something not working as expected? Don’t worry if you can’t figure out what the problem is; we’re here to help! As a first step, ensure your app is compatible with the agent’s [supported technologies](apm-agent-php://reference/supported-technologies.md). -If you’re an existing Elastic customer with a support contract, please create a ticket in the [Elastic Support portal](https://support.elastic.co/customers/s/login/). Other users can post in the [APM discuss forum](https://discuss.elastic.co/c/apm). +If you’re an existing Elastic customer with a support contract, create a ticket in the [Elastic Support portal](https://support.elastic.co/customers/s/login/). Other users can post in the [APM discuss forum](https://discuss.elastic.co/c/apm). ::::{important} -**Please upload your complete debug logs** to a service like [GitHub Gist](https://gist.github.com) so that we can analyze the problem. Logs should include everything from when the application starts up until the first request executes. +**Upload your complete debug logs** to a service like [GitHub Gist](https://gist.github.com) so that we can analyze the problem. Logs should include everything from when the application starts up until the first request executes. :::: diff --git a/troubleshoot/observability/apm-agent-rum-js/apm-real-user-monitoring-javascript-agent.md b/troubleshoot/observability/apm-agent-rum-js/apm-real-user-monitoring-javascript-agent.md index 9577110a3c..01e498bd3c 100644 --- a/troubleshoot/observability/apm-agent-rum-js/apm-real-user-monitoring-javascript-agent.md +++ b/troubleshoot/observability/apm-agent-rum-js/apm-real-user-monitoring-javascript-agent.md @@ -87,7 +87,7 @@ To disable the agent, set [`active`](apm-agent-rum-js://reference/configuration. ## Get in touch [get-in-touch] -If you have any questions, please create a new topic in the [Elastic APM discuss forum](https://discuss.elastic.co/c/apm). +If you have any questions, create a new topic in the [Elastic APM discuss forum](https://discuss.elastic.co/c/apm). -For bug reports and feature requests, please [create an issue](https://github.com/elastic/apm-agent-rum-js/issues/new) on our [github repo](https://github.com/elastic/apm-agent-rum-js) and include as much information as possible. See [Debugging](#debugging) for how to gather debugging information. +For bug reports and feature requests, [create an issue](https://github.com/elastic/apm-agent-rum-js/issues/new) on our [github repo](https://github.com/elastic/apm-agent-rum-js) and include as much information as possible. See [Debugging](#debugging) for how to gather debugging information. diff --git a/troubleshoot/observability/apm-agent-swift/apm-ios-agent.md b/troubleshoot/observability/apm-agent-swift/apm-ios-agent.md index dfe482cfd3..ebd13460e2 100644 --- a/troubleshoot/observability/apm-agent-swift/apm-ios-agent.md +++ b/troubleshoot/observability/apm-agent-swift/apm-ios-agent.md @@ -10,10 +10,10 @@ applies_to: Is something not working as expected? Don’t worry if you can’t figure out what the problem is; we’re here to help! As a first step, ensure your app is compatible with the agent’s [supported technologies](apm-agent-ios://reference/supported-technologies.md). -If you’re an existing Elastic customer with a support contract, please create a ticket in the [Elastic Support portal](https://support.elastic.co/customers/s/login/). Other users can post in the [APM discuss forum](https://discuss.elastic.co/c/apm). +If you’re an existing Elastic customer with a support contract, create a ticket in the [Elastic Support portal](https://support.elastic.co/customers/s/login/). Other users can post in the [APM discuss forum](https://discuss.elastic.co/c/apm). ::::{important} -**Please upload your complete debug logs** to a service like [GitHub Gist](https://gist.github.com) so that we can analyze the problem. Logs should include everything from when the application starts up until the first request executes. +**Upload your complete debug logs** to a service like [GitHub Gist](https://gist.github.com) so that we can analyze the problem. Logs should include everything from when the application starts up until the first request executes. :::: diff --git a/troubleshoot/observability/apm/_agent_is_not_instrumenting_code.md b/troubleshoot/observability/apm/_agent_is_not_instrumenting_code.md index 8720554743..dad639228f 100644 --- a/troubleshoot/observability/apm/_agent_is_not_instrumenting_code.md +++ b/troubleshoot/observability/apm/_agent_is_not_instrumenting_code.md @@ -11,7 +11,7 @@ applies_to: ## `open_basedir` PHP configuration option [_open_basedir_php_configuration_option] -If you see a similar entry in the agent log, this indicates an incorrect open_basedir configuration. For more details please see [the corresponding Limitations sub-section](apm-agent-php://reference/set-up-apm-php-agent.md#limitation-open_basedir). +If you see a similar entry in the agent log, this indicates an incorrect open_basedir configuration. For more details see [the corresponding Limitations sub-section](apm-agent-php://reference/set-up-apm-php-agent.md#limitation-open_basedir). ``` [Elastic APM PHP Tracer] 2023-08-23 14:38:12.223397+02:00 [PID: 268995] [TID: 268995] [WARNING] [Lifecycle] [lifecycle.cpp:558] [elasticApmModuleInit] Elastic Agent bootstrap file (/home/paplo/sources/apm-agent-php/agent/php/bootstrap_php_part.php) is located outside of paths allowed by open_basedir ini setting. Read more details here https://www.elastic.co/guide/en/apm/agent/php/current/setup.html diff --git a/troubleshoot/observability/apm/_collection_of_diagnostic_information.md b/troubleshoot/observability/apm/_collection_of_diagnostic_information.md index 016c2b4fbb..94ea3fbc62 100644 --- a/troubleshoot/observability/apm/_collection_of_diagnostic_information.md +++ b/troubleshoot/observability/apm/_collection_of_diagnostic_information.md @@ -34,7 +34,7 @@ If there are multiple PHP processes in your system, we allow you to specify dire * `%t` - In this place, the agent will substitute the UNIX timestamp. ::::{important} -After setting the path, remember to **fully restart the process** for which you are collecting diagnostic information. This may vary depending on the context, such as PHP, PHP-FPM, Apache, or PHP-CGI. Diagnostic information will be recorded after the first HTTP request is made or at the beginning of script execution for PHP-CLI.

Please also be aware that the information contained in the output file may include sensitive data, such as passwords, security tokens or environment variables from your system. Make sure to review the data and mask sensitive information before sharing the file publicly.

After collecting diagnostic information, remember to disable this feature and restore the previous configuration in php.ini or the environment variable. +After setting the path, remember to **fully restart the process** for which you are collecting diagnostic information. This may vary depending on the context, such as PHP, PHP-FPM, Apache, or PHP-CGI. Diagnostic information will be recorded after the first HTTP request is made or at the beginning of script execution for PHP-CLI.

Be aware that the information contained in the output file may include sensitive data, such as passwords, security tokens or environment variables from your system. Make sure to review the data and mask sensitive information before sharing the file publicly.

After collecting diagnostic information, remember to disable this feature and restore the previous configuration in php.ini or the environment variable. :::: diff --git a/troubleshoot/observability/apm/common-problems.md b/troubleshoot/observability/apm/common-problems.md index d966fc87c4..7327a90bd4 100644 --- a/troubleshoot/observability/apm/common-problems.md +++ b/troubleshoot/observability/apm/common-problems.md @@ -185,7 +185,7 @@ In the agent logs, you won’t see a sign of failures as the APM server asynchro stack: all ``` -Tail-based sampling requires minimal memory to run, and there should not be a noticeable increase in RSS memory usage. However, since tail-based sampling writes data to disk, it is possible to see a significant increase in OS page cache memory usage due to disk IO. If you see a drop in throughput and excessive disk activity after enabling tail-based sampling, please ensure that there is enough memory headroom in the system for OS page cache to perform disk IO efficiently. +Tail-based sampling requires minimal memory to run, and there should not be a noticeable increase in RSS memory usage. However, since tail-based sampling writes data to disk, it is possible to see a significant increase in OS page cache memory usage due to disk IO. If you see a drop in throughput and excessive disk activity after enabling tail-based sampling, ensure that there is enough memory headroom in the system for OS page cache to perform disk IO efficiently. ## Too many unique transaction names [troubleshooting-too-many-transactions] @@ -217,7 +217,7 @@ If you feel like you’d be losing valuable information by following this naming After ensuring you’ve correctly named your transactions, you might still see errors in the Applications UI related to transaction group limit reached: -`The number of transaction groups has been reached. Current APM server capacity for handling unique transaction groups has been reached. There are at least X transactions missing in this list. Please decrease the number of transaction groups in your service or increase the memory allocated to APM server.` +`The number of transaction groups has been reached. Current APM server capacity for handling unique transaction groups has been reached. There are at least X transactions missing in this list. Decrease the number of transaction groups in your service or increase the memory allocated to APM server.` You will see this warning if an agent is creating too many transaction groups. This could indicate incorrect instrumentation which will have to be fixed in your application. Alternatively you can increase the memory of the APM server. diff --git a/troubleshoot/observability/troubleshoot-logs.md b/troubleshoot/observability/troubleshoot-logs.md index 883c7af66a..849d327919 100644 --- a/troubleshoot/observability/troubleshoot-logs.md +++ b/troubleshoot/observability/troubleshoot-logs.md @@ -50,7 +50,7 @@ User does not have permissions to create API key. Required cluster privileges are [`monitor`, `manage_own_api_key`] and required index privileges are [`auto_configure`, `create_doc`] for -indices [`logs-*-*`, `metrics-*-*`], please add all required privileges +indices [`logs-*-*`, `metrics-*-*`], add all required privileges to the role of the authenticated user. ``` diff --git a/troubleshoot/observability/troubleshoot-your-universal-profiling-agent-deployment.md b/troubleshoot/observability/troubleshoot-your-universal-profiling-agent-deployment.md index 579b8ae3ca..f4337b38aa 100644 --- a/troubleshoot/observability/troubleshoot-your-universal-profiling-agent-deployment.md +++ b/troubleshoot/observability/troubleshoot-your-universal-profiling-agent-deployment.md @@ -97,7 +97,7 @@ If running this command outputs error-level logs, the following are possible cau * The Universal Profiling Agent is incompatible with the {{stack}} version. In this case, the following message is logged: ```txt - rpc error: code = FailedPrecondition desc= HostAgent version is unsupported, please upgrade to the latest version + rpc error: code = FailedPrecondition desc= HostAgent version is unsupported, upgrade to the latest version ``` Follow the Universal Profiling Agent deployment instructions shown in Kibana which will always be correct for the {{stack}} version that you are using. @@ -105,7 +105,7 @@ If running this command outputs error-level logs, the following are possible cau * You are using a Universal Profling Agent from a newer {{stack}} version, configured to connect to an older {{stack}} version cluster. In this case, the following message is logged: ```txt - rpc error: code = FailedPrecondition desc= Backend is incompatible with HostAgent, please check your configuration + rpc error: code = FailedPrecondition desc= Backend is incompatible with HostAgent, check your configuration ``` Follow the Universal Profiling Agent deployment instructions shown in Kibana which will always be correct for the {{stack}} version that you are using. diff --git a/troubleshoot/security/detection-rules.md b/troubleshoot/security/detection-rules.md index 6433466db2..72d9f5c971 100644 --- a/troubleshoot/security/detection-rules.md +++ b/troubleshoot/security/detection-rules.md @@ -159,7 +159,7 @@ It’s recommended to set the `Additional look-back time` to at least 1 minute. {{elastic-sec}} prevents duplication. Any duplicate alerts that are discovered during the `Additional look-back time` are *not* created. ::::{note} -If the rule that experiences gaps is an indicator match rule, see [how to tune indicator match rules](../../solutions/security/detect-and-alert/tune-detection-rules.md#tune-indicator-rules). Also please note that {{elastic-sec}} provides [limited support for indicator match rules](../../solutions/security/detect-and-alert.md#support-indicator-rules). +If the rule that experiences gaps is an indicator match rule, see [how to tune indicator match rules](../../solutions/security/detect-and-alert/tune-detection-rules.md#tune-indicator-rules). Also note that {{elastic-sec}} provides [limited support for indicator match rules](../../solutions/security/detect-and-alert.md#support-indicator-rules). :::: From 2d55c068a40bb71dc6a5569b92da8ac13664fcfc Mon Sep 17 00:00:00 2001 From: Fabrizio Ferri Benedetti Date: Thu, 10 Apr 2025 18:08:15 +0200 Subject: [PATCH 2/4] Restore code pleases --- .../deploy/elastic-cloud/azure-native-isv-service.md | 4 ++-- deploy-manage/security/traffic-filtering.md | 2 +- explore-analyze/alerts-cases/watcher/actions-pagerduty.md | 4 ++-- solutions/search/semantic-search/semantic-search-inference.md | 2 +- troubleshoot/kibana/migration-failures.md | 4 ++-- troubleshoot/observability/apm/common-problems.md | 2 +- troubleshoot/observability/troubleshoot-logs.md | 2 +- .../troubleshoot-your-universal-profiling-agent-deployment.md | 4 ++-- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md b/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md index 29b7fbfec9..b96cc249d0 100644 --- a/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md +++ b/deploy-manage/deploy/elastic-cloud/azure-native-isv-service.md @@ -467,11 +467,11 @@ $$$azure-integration-deployment-failed-traffic-filter$$$My {{ecloud}} deployment ```txt { "code": "DeploymentFailed", - "message": "At least one resource deployment operation failed. List deployment operations for details. See https://aka.ms/DeployOperations for usage details.", + "message": "At least one resource deployment operation failed. Please list deployment operations for details. Please see https://aka.ms/DeployOperations for usage details.", "details": [ { "code": "500", - "message": "An error occurred during deployment creation. Try again. If the problem persists, contact support@elastic.co." + "message": "An error occurred during deployment creation. Please try again. If the problem persists, please contact support@elastic.co." } ] ``` diff --git a/deploy-manage/security/traffic-filtering.md b/deploy-manage/security/traffic-filtering.md index 70993a5a3a..472d95f8cb 100644 --- a/deploy-manage/security/traffic-filtering.md +++ b/deploy-manage/security/traffic-filtering.md @@ -116,7 +116,7 @@ Requests rejected by traffic filter have status code `403 Forbidden` and one of ``` ```json -{"ok":false,"message":"Forbidden due to traffic filtering. See the Elastic documentation on Traffic Filtering for more information."} +{"ok":false,"message":"Forbidden due to traffic filtering. Please see the Elastic documentation on Traffic Filtering for more information."} ``` Additionally, traffic filter rejections are logged in ECE proxy logs as `status_reason: BLOCKED_BY_IP_FILTER`. Proxy logs also provide client IP in `client_ip` field. \ No newline at end of file diff --git a/explore-analyze/alerts-cases/watcher/actions-pagerduty.md b/explore-analyze/alerts-cases/watcher/actions-pagerduty.md index 2577e4f57a..7cae701c35 100644 --- a/explore-analyze/alerts-cases/watcher/actions-pagerduty.md +++ b/explore-analyze/alerts-cases/watcher/actions-pagerduty.md @@ -23,7 +23,7 @@ The following snippet shows a simple PagerDuty action definition: "transform" : { ... }, "throttle_period" : "5m", "pagerduty" : { - "description" : "Main system down, check!" <1> + "description" : "Main system down, please check!" <1> } } } @@ -41,7 +41,7 @@ To give the PagerDuty incident some more context, you can attach the payload as "throttle_period" : "5m", "pagerduty" : { "account" : "team1", - "description" : "Main system down, check! Happened at {{ctx.execution_time}}", + "description" : "Main system down, please check! Happened at {{ctx.execution_time}}", "attach_payload" : true, "client" : "/foo/bar/{{ctx.watch_id}}", "client_url" : "http://www.example.org/", diff --git a/solutions/search/semantic-search/semantic-search-inference.md b/solutions/search/semantic-search/semantic-search-inference.md index 29e2a45911..5e435893ff 100644 --- a/solutions/search/semantic-search/semantic-search-inference.md +++ b/solutions/search/semantic-search/semantic-search-inference.md @@ -1458,7 +1458,7 @@ As a result, you receive the top 10 documents that are closest in meaning to the "_score": 0.86815524, "_source": { "id": 3041038, - "content": "For example, the cost of the fuel could be 96.9, the amount could be 10 pounds, and the distance covered could be 80 miles. To convert between Litres per 100KM and Miles Per Gallon, provide a value and click on the required button.o calculate how much fuel you'll need for a given journey, provide the distance in miles you will be covering on your journey, and the estimated MPG of your vehicle. To work out what MPG you are really getting, provide the cost of the fuel, how much you spent on the fuel, and how far it took you." + "content": "For example, the cost of the fuel could be 96.9, the amount could be 10 pounds, and the distance covered could be 80 miles. To convert between Litres per 100KM and Miles Per Gallon, please provide a value and click on the required button.o calculate how much fuel you'll need for a given journey, please provide the distance in miles you will be covering on your journey, and the estimated MPG of your vehicle. To work out what MPG you are really getting, please provide the cost of the fuel, how much you spent on the fuel, and how far it took you." } }, { diff --git a/troubleshoot/kibana/migration-failures.md b/troubleshoot/kibana/migration-failures.md index 8ad4eb9de1..dcc4f4ce04 100644 --- a/troubleshoot/kibana/migration-failures.md +++ b/troubleshoot/kibana/migration-failures.md @@ -31,7 +31,7 @@ Saved objects that are corrupted through manual editing or integrations cause mi For example, you receive the following error message: ```sh -Unable to migrate the corrupt saved object document with _id: 'marketing_space:dashboard:e3c5fc71-ac71-4805-bcab-2bcc9cc93275'. To allow migrations to proceed, delete this document from the [.kibana_7.12.0_001] index. +Unable to migrate the corrupt saved object document with _id: 'marketing_space:dashboard:e3c5fc71-ac71-4805-bcab-2bcc9cc93275'. To allow migrations to proceed, please delete this document from the [.kibana_7.12.0_001] index. ``` To delete the documents that cause migrations to fail, take the following steps: @@ -177,7 +177,7 @@ If routing allocation is the issue, the `_cluster/allocation/explain` API will r Upgrade migrations fail because routing allocation is disabled or restricted (`cluster.routing.allocation.enable: none/primaries/new_primaries`), which causes {{kib}} to log errors such as: ```sh -Unable to complete saved object migrations for the [.kibana] index: [incompatible_cluster_routing_allocation] The elasticsearch cluster has cluster routing allocation incorrectly set for migrations to continue. To proceed, remove the cluster routing allocation settings with PUT /_cluster/settings {"transient": {"cluster.routing.allocation.enable": null}, "persistent": {"cluster.routing.allocation.enable": null}} +Unable to complete saved object migrations for the [.kibana] index: [incompatible_cluster_routing_allocation] The elasticsearch cluster has cluster routing allocation incorrectly set for migrations to continue. To proceed, please remove the cluster routing allocation settings with PUT /_cluster/settings {"transient": {"cluster.routing.allocation.enable": null}, "persistent": {"cluster.routing.allocation.enable": null}} ``` To get around the issue, remove the transient and persisted routing allocation settings: diff --git a/troubleshoot/observability/apm/common-problems.md b/troubleshoot/observability/apm/common-problems.md index 7327a90bd4..ec8e1eb94d 100644 --- a/troubleshoot/observability/apm/common-problems.md +++ b/troubleshoot/observability/apm/common-problems.md @@ -217,7 +217,7 @@ If you feel like you’d be losing valuable information by following this naming After ensuring you’ve correctly named your transactions, you might still see errors in the Applications UI related to transaction group limit reached: -`The number of transaction groups has been reached. Current APM server capacity for handling unique transaction groups has been reached. There are at least X transactions missing in this list. Decrease the number of transaction groups in your service or increase the memory allocated to APM server.` +`The number of transaction groups has been reached. Current APM server capacity for handling unique transaction groups has been reached. There are at least X transactions missing in this list. Please decrease the number of transaction groups in your service or increase the memory allocated to APM server.` You will see this warning if an agent is creating too many transaction groups. This could indicate incorrect instrumentation which will have to be fixed in your application. Alternatively you can increase the memory of the APM server. diff --git a/troubleshoot/observability/troubleshoot-logs.md b/troubleshoot/observability/troubleshoot-logs.md index 849d327919..883c7af66a 100644 --- a/troubleshoot/observability/troubleshoot-logs.md +++ b/troubleshoot/observability/troubleshoot-logs.md @@ -50,7 +50,7 @@ User does not have permissions to create API key. Required cluster privileges are [`monitor`, `manage_own_api_key`] and required index privileges are [`auto_configure`, `create_doc`] for -indices [`logs-*-*`, `metrics-*-*`], add all required privileges +indices [`logs-*-*`, `metrics-*-*`], please add all required privileges to the role of the authenticated user. ``` diff --git a/troubleshoot/observability/troubleshoot-your-universal-profiling-agent-deployment.md b/troubleshoot/observability/troubleshoot-your-universal-profiling-agent-deployment.md index f4337b38aa..579b8ae3ca 100644 --- a/troubleshoot/observability/troubleshoot-your-universal-profiling-agent-deployment.md +++ b/troubleshoot/observability/troubleshoot-your-universal-profiling-agent-deployment.md @@ -97,7 +97,7 @@ If running this command outputs error-level logs, the following are possible cau * The Universal Profiling Agent is incompatible with the {{stack}} version. In this case, the following message is logged: ```txt - rpc error: code = FailedPrecondition desc= HostAgent version is unsupported, upgrade to the latest version + rpc error: code = FailedPrecondition desc= HostAgent version is unsupported, please upgrade to the latest version ``` Follow the Universal Profiling Agent deployment instructions shown in Kibana which will always be correct for the {{stack}} version that you are using. @@ -105,7 +105,7 @@ If running this command outputs error-level logs, the following are possible cau * You are using a Universal Profling Agent from a newer {{stack}} version, configured to connect to an older {{stack}} version cluster. In this case, the following message is logged: ```txt - rpc error: code = FailedPrecondition desc= Backend is incompatible with HostAgent, check your configuration + rpc error: code = FailedPrecondition desc= Backend is incompatible with HostAgent, please check your configuration ``` Follow the Universal Profiling Agent deployment instructions shown in Kibana which will always be correct for the {{stack}} version that you are using. From ecfa344e95e5342ffe3be20b2585086393d1d4b5 Mon Sep 17 00:00:00 2001 From: Fabrizio Ferri-Benedetti Date: Thu, 10 Apr 2025 18:38:53 +0200 Subject: [PATCH 3/4] Update deploy-manage/deploy/elastic-cloud/upload-custom-plugins-bundles.md Co-authored-by: Janeen Mikell Roberts <57149392+jmikell821@users.noreply.github.com> --- .../deploy/elastic-cloud/upload-custom-plugins-bundles.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy-manage/deploy/elastic-cloud/upload-custom-plugins-bundles.md b/deploy-manage/deploy/elastic-cloud/upload-custom-plugins-bundles.md index c4e1757959..a3907cdd11 100644 --- a/deploy-manage/deploy/elastic-cloud/upload-custom-plugins-bundles.md +++ b/deploy-manage/deploy/elastic-cloud/upload-custom-plugins-bundles.md @@ -248,4 +248,4 @@ https://api.elastic-cloud.com/api/v1/deployments/extensions \ }' ``` -See [Extensions API reference](https://www.elastic.co/docs/api/doc/cloud/group/endpoint-extensions) for the complete set of HTTP methods and payloads. +Refer to [Extensions API reference](https://www.elastic.co/docs/api/doc/cloud/group/endpoint-extensions) for the complete set of HTTP methods and payloads. From 1cf00309251fc1646f14c4104f74622e6e18712e Mon Sep 17 00:00:00 2001 From: Fabrizio Ferri-Benedetti Date: Thu, 10 Apr 2025 18:38:58 +0200 Subject: [PATCH 4/4] Update deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md Co-authored-by: Janeen Mikell Roberts <57149392+jmikell821@users.noreply.github.com> --- .../deploy/self-managed/install-elasticsearch-with-rpm.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md index 91bfd505cb..0d95ebc2c4 100644 --- a/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md +++ b/deploy-manage/deploy/self-managed/install-elasticsearch-with-rpm.md @@ -18,7 +18,7 @@ applies_to: The RPM package for {{es}} can be [downloaded from our website](#install-rpm) or from our [RPM repository](#rpm-repo). It can be used to install {{es}} on any RPM-based system such as OpenSuSE, SLES, Centos, Red Hat, and Oracle Enterprise. ::::{note} -RPM install is not supported on distributions with old versions of RPM, such as SLES 11 and CentOS 5. See [Install {{es}} from archive on Linux or MacOS](install-elasticsearch-from-archive-on-linux-macos.md) instead. +RPM install is not supported on distributions with old versions of RPM, such as SLES 11 and CentOS 5. Refer to [Install {{es}} from archive on Linux or MacOS](install-elasticsearch-from-archive-on-linux-macos.md) instead. :::: :::{include} _snippets/trial.md