diff --git a/docset.yml b/docset.yml index f1b93d6c31..76359d5414 100644 --- a/docset.yml +++ b/docset.yml @@ -6,6 +6,7 @@ features: exclude: - 'README.md' + - 'raw-migrated-files/**' cross_links: - apm-agent-android @@ -66,7 +67,6 @@ toc: - toc: release-notes - toc: reference - toc: extend - - toc: raw-migrated-files - hidden: 404.md subs: diff --git a/raw-migrated-files/apm-agent-android/apm-agent-android/index.md b/raw-migrated-files/apm-agent-android/apm-agent-android/index.md deleted file mode 100644 index 89dd87f51c..0000000000 --- a/raw-migrated-files/apm-agent-android/apm-agent-android/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# APM Android agent - -Migrated files from the APM Android agent book. \ No newline at end of file diff --git a/raw-migrated-files/apm-agent-android/apm-agent-android/release-notes.md b/raw-migrated-files/apm-agent-android/apm-agent-android/release-notes.md deleted file mode 100644 index 13525e270b..0000000000 --- a/raw-migrated-files/apm-agent-android/apm-agent-android/release-notes.md +++ /dev/null @@ -1,10 +0,0 @@ -# Release notes [release-notes] - -::::{warning} -This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. -:::: - - -* [Android agent version 0.x](apm-agent-android://release-notes/index.md) - - diff --git a/raw-migrated-files/apm-agent-ios/apm-agent-swift/index.md b/raw-migrated-files/apm-agent-ios/apm-agent-swift/index.md deleted file mode 100644 index 8f6a0aeb94..0000000000 --- a/raw-migrated-files/apm-agent-ios/apm-agent-swift/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# APM iOS agent - -Migrated files from the APM iOS agent book. \ No newline at end of file diff --git a/raw-migrated-files/apm-agent-ios/apm-agent-swift/release-notes-v1.0.0.md b/raw-migrated-files/apm-agent-ios/apm-agent-swift/release-notes-v1.0.0.md deleted file mode 100644 index fc01279686..0000000000 --- a/raw-migrated-files/apm-agent-ios/apm-agent-swift/release-notes-v1.0.0.md +++ /dev/null @@ -1,8 +0,0 @@ -# v1.0.0 [release-notes-v1.0.0] - -## Features [_features_3] - -* Added network status to all signals [#202] -* Added session.id to crash reports [#195] - - diff --git a/raw-migrated-files/apm-agent-ios/apm-agent-swift/release-notes-v1.0.1.md b/raw-migrated-files/apm-agent-ios/apm-agent-swift/release-notes-v1.0.1.md deleted file mode 100644 index d0594c67f1..0000000000 --- a/raw-migrated-files/apm-agent-ios/apm-agent-swift/release-notes-v1.0.1.md +++ /dev/null @@ -1,7 +0,0 @@ -# v1.0.1 [release-notes-v1.0.1] - -## Fixes [_fixes] - -* fixed memory leaks related to NTP usage [#212] - - diff --git a/raw-migrated-files/apm-agent-ruby/apm-agent-ruby/index.md b/raw-migrated-files/apm-agent-ruby/apm-agent-ruby/index.md deleted file mode 100644 index 6aa102da51..0000000000 --- a/raw-migrated-files/apm-agent-ruby/apm-agent-ruby/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# APM Ruby agent - -Migrated files from the APM Ruby agent book. \ No newline at end of file diff --git a/raw-migrated-files/apm-agent-ruby/apm-agent-ruby/release-notes-4.x.md b/raw-migrated-files/apm-agent-ruby/apm-agent-ruby/release-notes-4.x.md deleted file mode 100644 index d4dfa49863..0000000000 --- a/raw-migrated-files/apm-agent-ruby/apm-agent-ruby/release-notes-4.x.md +++ /dev/null @@ -1,216 +0,0 @@ -# Ruby Agent version 4.x [release-notes-4.x] - -[[release-notes-4.7.3] ==== 4.7.3 - - -#### Fixed [_fixed] - -* Address a bug where if `capture_headers` is false, `ContextBuilder` will raise `"undefined method 'has_key?' for nil:NilClass"` [#1449](https://github.com/elastic/apm-agent-ruby/pull/1449) - -[[release-notes-4.7.2] ==== 4.7.2 - - -#### Fixed [_fixed_2] - -* Address machineType not being returned in GCP metadata [#1435](https://github.com/elastic/apm-agent-ruby/pull/1435) - -[[release-notes-4.7.1] ==== 4.7.1 - - -#### Fixed [_fixed_3] - -* Skip capturing cookie header when it’s set separately [#1405](https://github.com/elastic/apm-agent-ruby/pull/1405) -* Changes/fixes to metadata.cloud.* fields collected for GCP [#1415](https://github.com/elastic/apm-agent-ruby/pull/1415) -* Pin version of bigdecimal for ruby 2.4 [#1417](https://github.com/elastic/apm-agent-ruby/pull/1417) -* Use response method on Faraday error for older versions of the library [#1419](https://github.com/elastic/apm-agent-ruby/pull/1419) -* Fix ActionDispatchSpy#render_exception for Rails 7.1 [#1423](https://github.com/elastic/apm-agent-ruby/pull/1423) -* Use graphql < 2.1 when Ruby < 2.7 [#1425](https://github.com/elastic/apm-agent-ruby/pull/1425) -* Guard against various Faraday exception response formats [#1428](https://github.com/elastic/apm-agent-ruby/pull/1428) - -## 4.7.0 [release-notes-4.7.0] - - -#### Fixed [_fixed_4] - -* Handle Faraday response being nil [#1382](https://github.com/elastic/apm-agent-ruby/pull/1382) -* Fix error with invalid %-encoding [#1400](https://github.com/elastic/apm-agent-ruby/pull/1400) - - -#### Added [_added] - -* Add keyword args for span_method helper [#1395](https://github.com/elastic/apm-agent-ruby/pull/1395) - - -## 4.6.2 [release-notes-4.6.2] - - -#### Fixed [_fixed_5] - -* Fix Faraday::RackBuilder::StackLocked [#1371](https://github.com/elastic/apm-agent-ruby/pull/1371) - - -## 4.6.1 [release-notes-4.6.1] - - -#### Fixed [_fixed_6] - -* Fix growing number of open file descriptors when HTTP request to APM is never sent [#1351](https://github.com/elastic/apm-agent-ruby/pull/1351) -* Fix setting span http status code when Faraday Middleware is used [#1368](https://github.com/elastic/apm-agent-ruby/pull/1368) -* Handle whitespace when splitting tracestate entries [#1353](https://github.com/elastic/apm-agent-ruby/pull/1353) - - -## 4.6.0 [release-notes-4.6.0] - - -#### Added [_added_2] - -* Added transaction_name to reported error to allow grouping by transaction name [#1267](https://github.com/elastic/apm-agent-ruby/pull/1267) -* Added ability to query server for version (useful in the future) [#1278](https://github.com/elastic/apm-agent-ruby/pull/1278) -* Added instrumentation for [https://github.com/zendesk/racecar/](https://github.com/zendesk/racecar/) Racecar Kafka library [#1284](https://github.com/elastic/apm-agent-ruby/pull/1284) - -### Changed [_changed] - -* Expanded filtering to sanitize any key that contains the string *auth* [#1266](https://github.com/elastic/apm-agent-ruby/pull/1266) -* Rename `log_ecs_formatting` option to `log_ecs_reformatting`, deprecate old option name [#1248](https://github.com/elastic/apm-agent-ruby/pull/1248) -* When the configuration value for `log_path` is set, override the `logger` to point to that path instead of using e.g. Rails logger [#1247](https://github.com/elastic/apm-agent-ruby/pull/1247) -* Only send tracestate header for distributed tracing when it has content [#1277](https://github.com/elastic/apm-agent-ruby/pull/1277) -* Use the hostname as the Kubernetes pod name in the Container Info metadata if the pod id is parsed from cgroup [#1314](https://github.com/elastic/apm-agent-ruby/pull/1314) - -#### Fixed [_fixed_7] - -* Small change to Sidekiq tests to handle new configuration passing method [#1283](https://github.com/elastic/apm-agent-ruby/pull/1283) -* Set transaction sample rate to 0 when it’s unsampled [#1339](https://github.com/elastic/apm-agent-ruby/pull/1339) -* Don’t send unsampled transactions to APM server >= 8.0 [#1341](https://github.com/elastic/apm-agent-ruby/pull/1341) - - - - -## 4.5.1 [release-notes-4.5.1] - - -#### Changed [_changed_2] - -* Update elasticsearch spy to use new transport gem name [#1257](https://github.com/elastic/apm-agent-ruby/pull/1257) -* Standardize placeholder for phone numbers as [PHONENUMBER] per [https://github.com/elastic/apm/blob/main/specs/agents/tracing-instrumentation-aws.md](https://github.com/elastic/apm/blob/main/specs/agents/tracing-instrumentation-aws.md) [#1246](https://github.com/elastic/apm-agent-ruby/pull/1246) - -### Fixed [_fixed_8] - -* Fixed dependencies to allow CI to build successfully [#1259](https://github.com/elastic/apm-agent-ruby/pull/1259) -* Fixed warnings related to TimeTask timeouts [#1255](https://github.com/elastic/apm-agent-ruby/pull/1255) - - - -## 4.5.0 [release-notes-4.5.0] - - -#### Changed [_changed_3] - -* Stop collecting the field `http.request.socket.encrypted` [#1181](https://github.com/elastic/apm-agent-ruby/pull/1181) - - -#### Fixed [_fixed_9] - -* Fixed MongoDB spy thread safety [#1202](https://github.com/elastic/apm-agent-ruby/pull/1202) -* Fixed span context fields for DynamoDB instrumentation [#1178](https://github.com/elastic/apm-agent-ruby/pull/1178) -* Fixed span context fields for S3 instrumentation [#1179](https://github.com/elastic/apm-agent-ruby/pull/1179) -* Update user agent info to match spec [#1182](https://github.com/elastic/apm-agent-ruby/pull/1182) - - -## 4.4.0 [release-notes-4.4.0] - - -#### Added [_added_3] - -* Optional span to be ended instead of current span [#1039](https://github.com/elastic/apm-agent-ruby/pull/1039) -* Config option `log_ecs_formatting` [#1053](https://github.com/elastic/apm-agent-ruby/pull/1053) - - -#### Fixed [_fixed_10] - -* Fixed detecting Linux on Alpine for CPU/MEM metrics [#1057](https://github.com/elastic/apm-agent-ruby/pull/1057) - - -## 4.3.0 [release-notes-4.3.0] - - -#### Added [_added_4] - -* Add JVM memory metrics [#1040](https://github.com/elastic/apm-agent-ruby/pull/1040) - - -## 4.2.0 [release-notes-4.2.0] - - -#### Added [_added_5] - -* Add support for AWS Storage Table/CosmosDB [#999](https://github.com/elastic/apm-agent-ruby/pull/999) - - -#### Fixed [_fixed_11] - -* Align HTTP span types/subtypes with spec [#1014](https://github.com/elastic/apm-agent-ruby/pull/1014) -* Passing a full URL as a path to `Net::HTTP` [#1029](https://github.com/elastic/apm-agent-ruby/pull/1029) -* Fix growing number of open file descriptors [#1033](https://github.com/elastic/apm-agent-ruby/pull/1033) - - -## 4.1.0 [release-notes-4.1.0] - - -#### Added [_added_6] - -* Azure App Services instance metadata [#1007](https://github.com/elastic/apm-agent-ruby/pull/1007) - - -#### Changed [_changed_4] - -* `hostname` is now reported split by `configured_hostname` and `detected_hostname` [#1009](https://github.com/elastic/apm-agent-ruby/pull/1009) - - -#### Fixed [_fixed_12] - -* `service_node_name` is now correctly reported as `service.node.configured_name` [#1009](https://github.com/elastic/apm-agent-ruby/pull/1009) -* Fix JSON parsing when using yajl-ruby [#1012](https://github.com/elastic/apm-agent-ruby/pull/1012) -* Fix SpanHelpers when methods take blocks [#1013](https://github.com/elastic/apm-agent-ruby/pull/1013) -* Fix missing `environment` param when fetching from Central Config [#1014](https://github.com/elastic/apm-agent-ruby/pull/1014) - - -## 4.0.0 [release-notes-4.0.0] - - -#### Upgrading [_upgrading] - -Be aware that this release changes the agent’s general approach to instrumenting third party libraries. It now uses `Module#prepend` over alias method chaining. - -This doesn’t necessarily impact your application but it could if you are using other gems that use the old approach to patch the same method. Mixing the two approaches can lead to infinite recursion. - - -#### Removed [_removed] - -* Removed support for Ruby 2.3 and JRuby 9.1 [#901](https://github.com/elastic/apm-agent-ruby/pull/901) -* Config option `active`, see `enabled` [#900](https://github.com/elastic/apm-agent-ruby/pull/900) -* Config option `custom_key_filters`, see `sanitize_field_names` [#900](https://github.com/elastic/apm-agent-ruby/pull/900) -* Config option `default_tags`, see `global_labels` [#900](https://github.com/elastic/apm-agent-ruby/pull/900) -* Config option `default_labels`, see `global_labels` [#900](https://github.com/elastic/apm-agent-ruby/pull/900) -* Config option `ignore_url_patterns`, see `transaction_ignore_urls` [#900](https://github.com/elastic/apm-agent-ruby/pull/900) -* Config option `use_legacy_sql_parser`, legacy parser no longer included [#900](https://github.com/elastic/apm-agent-ruby/pull/900) - - -#### Changed [_changed_5] - -* Integrations (Spies) use Module#prepend over class_eval [#890](https://github.com/elastic/apm-agent-ruby/pull/890) -* The secrets filter no longer filters based on values, see `sanitize_field_names` [#900](https://github.com/elastic/apm-agent-ruby/pull/900) -* The secrets filter is aligned with other agents, see `sanitize_field_names` [#900](https://github.com/elastic/apm-agent-ruby/pull/900) - - -#### Added [_added_7] - -* Added `set_service` API [#1006](https://github.com/elastic/apm-agent-ruby/pull/1006) - - -#### Fixed [_fixed_13] - -* AWS S3 spy accepts symbol bucket names [#998](https://github.com/elastic/apm-agent-ruby/pull/998) -* AWS S3 spy passing on blocks [#998](https://github.com/elastic/apm-agent-ruby/pull/998) -* SQL scanner now recognizes CQL style comments [#1004](https://github.com/elastic/apm-agent-ruby/pull/1004) - - diff --git a/raw-migrated-files/docs-content/serverless/observability-ai-assistant.md b/raw-migrated-files/docs-content/serverless/observability-ai-assistant.md deleted file mode 100644 index f4d771284c..0000000000 --- a/raw-migrated-files/docs-content/serverless/observability-ai-assistant.md +++ /dev/null @@ -1,314 +0,0 @@ -# {{observability}} AI Assistant [observability-ai-assistant] - -The AI Assistant uses generative AI to provide: - -* **Chat**: Have conversations with the AI Assistant. Chat uses function calling to request, analyze, and visualize your data. -* **Contextual insights**: Open prompts throughout {{obs-serverless}} that explain errors and messages and suggest remediation. - -:::{image} /raw-migrated-files/images/serverless-ai-assistant-overview.gif -:alt: Observability AI assistant preview -:screenshot: -::: - -The AI Assistant integrates with your large language model (LLM) provider through our supported Elastic connectors: - -* [OpenAI connector](kibana://reference/connectors-kibana/openai-action-type.md) for OpenAI or Azure OpenAI Service. -* [Amazon Bedrock connector](kibana://reference/connectors-kibana/bedrock-action-type.md) for Amazon Bedrock, specifically for the Claude models. -* [Google Gemini connector](kibana://reference/connectors-kibana/gemini-action-type.md) for Google Gemini. - -::::{important} -The AI Assistant is powered by an integration with your large language model (LLM) provider. LLMs are known to sometimes present incorrect information as if it’s correct. Elastic supports configuration and connection to the LLM provider and your knowledge base, but is not responsible for the LLM’s responses. - -:::: - - -::::{important} -Also, the data you provide to the Observability AI assistant is *not* anonymized, and is stored and processed by the third-party AI provider. This includes any data used in conversations for analysis or context, such as alert or event data, detection rule configurations, and queries. Therefore, be careful about sharing any confidential or sensitive details while using this feature. - -:::: - - - -## Requirements [observability-ai-assistant-requirements] - -The AI assistant requires the following: - -* An account with a third-party generative AI provider that preferably supports function calling. If your AI provider does not support function calling, you can configure AI Assistant settings under **Project settings** → **Management** → **AI Assistant for Observability Settings** to simulate function calling, but this might affect performance. - - Refer to the [connector documentation](../../../deploy-manage/manage-connectors.md) for your provider to learn about supported and default models. - -* The knowledge base requires a 4 GB {{ml}} node. - -::::{important} -The free tier offered by third-party generative AI providers may not be sufficient for the proper functioning of the AI assistant. In most cases, a paid subscription to one of the supported providers is required. The Observability AI assistant doesn’t support connecting to a private LLM. Elastic doesn’t recommend using private LLMs with the Observability AI assistant. - -:::: - - - -## Your data and the AI Assistant [observability-ai-assistant-your-data-and-the-ai-assistant] - -Elastic does not use customer data for model training. This includes anything you send the model, such as alert or event data, detection rule configurations, queries, and prompts. However, any data you provide to the AI Assistant will be processed by the third-party provider you chose when setting up the OpenAI connector as part of the assistant setup. - -Elastic does not control third-party tools, and assumes no responsibility or liability for their content, operation, or use, nor for any loss or damage that may arise from your using such tools. Please exercise caution when using AI tools with personal, sensitive, or confidential information. Any data you submit may be used by the provider for AI training or other purposes. There is no guarantee that the provider will keep any information you provide secure or confidential. You should familiarize yourself with the privacy practices and terms of use of any generative AI tools prior to use. - - -## Set up the AI Assistant [observability-ai-assistant-set-up-the-ai-assistant] - -To set up the AI Assistant: - -1. Create an authentication key with your AI provider to authenticate requests from the AI Assistant. You’ll use this in the next step. Refer to your provider’s documentation for information about creating authentication keys: - - * [OpenAI API keys](https://platform.openai.com/docs/api-reference) - * [Azure OpenAI Service API keys](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference) - * [Amazon Bedrock authentication keys and secrets](https://docs.aws.amazon.com/bedrock/latest/userguide/security-iam.html) - * [Google Gemini service account keys](https://cloud.google.com/iam/docs/keys-list-get) - -2. From **Project settings** → **Management** → **Connectors**, create a connector for your AI provider: - - * [OpenAI](kibana://reference/connectors-kibana/openai-action-type.md) - * [Amazon Bedrock](kibana://reference/connectors-kibana/bedrock-action-type.md) - * [Google Gemini](kibana://reference/connectors-kibana/gemini-action-type.md) - -3. Authenticate communication between {{obs-serverless}} and the AI provider by providing the following information: - - 1. In the **URL** field, enter the AI provider’s API endpoint URL. - 2. Under **Authentication**, enter the key or secret you created in the previous step. - - - -## Add data to the AI Assistant knowledge base [observability-ai-assistant-add-data-to-the-ai-assistant-knowledge-base] - -::::{important} -**If you started using the AI Assistant in technical preview**, any knowledge base articles you created using ELSER v1 will need to be reindexed or upgraded before they can be used. Going forward, you must create knowledge base articles using ELSER v2. You can either: - -* Clear all old knowledge base articles manually and reindex them. -* Upgrade all knowledge base articles indexed with ELSER v1 to ELSER v2 using a [Python script](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/model-upgrades/upgrading-index-to-use-elser.ipynb). - -:::: - - -The AI Assistant uses [ELSER](../../../explore-analyze/machine-learning/nlp/ml-nlp-elser.md), Elastic’s semantic search engine, to recall data from its internal knowledge base index to create retrieval augmented generation (RAG) responses. Adding data such as Runbooks, GitHub issues, internal documentation, and Slack messages to the knowledge base gives the AI Assistant context to provide more specific assistance. - -::::{note} -Your AI provider may collect telemetry when using the AI Assistant. Contact your AI provider for information on how data is collected. - -:::: - - -You can add information to the knowledge base by asking the AI Assistant to remember something while chatting (for example, "remember this for next time"). The assistant will create a summary of the information and add it to the knowledge base. - -You can also add external data to the knowledge base either in the Project Settings UI or using the {{es}} Index API. - - -### Use the UI [observability-ai-assistant-use-the-ui] - -To add external data to the knowledge base in the Project Settings UI: - -1. Go to **Project Settings**. -2. In the *Other* section, click **AI assistant for Observability settings**. -3. Then select the **Elastic AI Assistant for Observability**. -4. Switch to the **Knowledge base** tab. -5. Click the **New entry** button, and choose either: - - * **Single entry**: Write content for a single entry in the UI. - * **Bulk import**: Upload a newline delimited JSON (`ndjson`) file containing a list of entries to add to the knowledge base. Each object should conform to the following format: - - ```json - { - "id": "a_unique_human_readable_id", - "text": "Contents of item", - } - ``` - - - -### Use the {{es}} Index API [observability-ai-assistant-use-the-es-index-api] - -1. Ingest external data (GitHub issues, Markdown files, Jira tickets, text files, etc.) into {{es}} using the {{es}} [Index API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create). -2. Reindex your data into the AI Assistant’s knowledge base index by completing the following query in **Developer Tools** → **Console**. Update the following fields before reindexing: - - * `InternalDocsIndex`: Name of the index where your internal documents are stored. - * `text_field`: Name of the field containing your internal documents' text. - * `timestamp`: Name of the timestamp field in your internal documents. - * `public`: If `true`, the document is available to all users with access to your Observability project. If `false`, the document is restricted to the user indicated in the following `user.name` field. - * `user.name` (optional): If defined, restricts the internal document’s availability to a specific user. - * You can add a query filter to index specific documents. - - -```console -POST _reindex -{ - "source": { - "index": "", - "_source": [ - "", - "", - "namespace", - "is_correction", - "public", - "confidence" - ] - }, - "dest": { - "index": ".kibana-observability-ai-assistant-kb-000001", - "pipeline": ".kibana-observability-ai-assistant-kb-ingest-pipeline" - }, - "script": { - "inline": "ctx._source.text = ctx._source.remove(\"\");ctx._source.namespace=\"\";ctx._source.is_correction=false;ctx._source.public=;ctx._source.confidence=\"high\";ctx._source['@timestamp'] = ctx._source.remove(\"\");ctx._source['user.name'] = \"\"" - } -} -``` - - -## Interact with the AI Assistant [observability-ai-assistant-interact-with-the-ai-assistant] - -You can chat with the AI Assistant or interact with contextual insights located throughout {{obs-serverless}}. See the following sections for more on interacting with the AI Assistant. - -::::{tip} -After every answer the LLM provides, let us know if the answer was helpful. Your feedback helps us improve the AI Assistant! - -:::: - - - -### Chat with the assistant [observability-ai-assistant-chat-with-the-assistant] - -Click the AI Assistant button (![AI Assistant icon](/raw-migrated-files/images/serverless-ai-assistant-button.png "")) in the upper-right corner where available to start the chat. - -This opens the AI Assistant flyout, where you can ask the assistant questions about your instance: - -:::{image} /raw-migrated-files/images/serverless-ai-assistant-chat.png -:alt: Observability AI assistant chat -:screenshot: -::: - -::::{important} -Asking questions about your data requires function calling, which enables LLMs to reliably interact with third-party generative AI providers to perform searches or run advanced functions using customer data. - -When the Observability AI Assistant performs searches in the cluster, the queries are run with the same level of permissions as the user. - -:::: - - - -### Suggest functions [observability-ai-assistant-suggest-functions] - -::::{warning} -This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features. -:::: - - -The AI Assistant uses several functions to include relevant context in the chat conversation through text, data, and visual components. Both you and the AI Assistant can suggest functions. You can also edit the AI Assistant’s function suggestions and inspect function responses. For example, you could use the `kibana` function to call a {{kib}} API on your behalf. - -You can suggest the following functions: - -| Function | Description | -| --- | --- | -| `alerts` | Get alerts for {{obs-serverless}}. | -| `elasticsearch` | Call {{es}} APIs on your behalf. | -| `kibana` | Call {{kib}} APIs on your behalf. | -| `summarize` | Summarize parts of the conversation. | -| `visualize_query` | Visualize charts for ES | - -Additional functions are available when your cluster has APM data: - -| Function | Description | -| --- | --- | -| `get_apm_correlations` | Get field values that are more prominent in the foreground set than the background set. This can be useful in determining which attributes (such as `error.message`, `service.node.name`, or `transaction.name`) are contributing to, for instance, a higher latency. Another option is a time-based comparison, where you compare before and after a change point. | -| `get_apm_downstream_dependencies` | Get the downstream dependencies (services or uninstrumented backends) for a service. Map the downstream dependency name to a service by returning both `span.destination.service.resource` and `service.name`. Use this to drill down further if needed. | -| `get_apm_error_document` | Get a sample error document based on the grouping name. This also includes the stacktrace of the error, which might hint to the cause. | -| `get_apm_service_summary` | Get a summary of a single service, including the language, service version, deployments, the environments, and the infrastructure that it is running in. For example, the number of pods and a list of their downstream dependencies. It also returns active alerts and anomalies. | -| `get_apm_services_list` | Get the list of monitored services, their health statuses, and alerts. | -| `get_apm_timeseries` | Display different APM metrics (such as throughput, failure rate, or latency) for any service or all services and any or all of their dependencies. Displayed both as a time series and as a single statistic. Additionally, the function returns any changes, such as spikes, step and trend changes, or dips. You can also use it to compare data by requesting two different time ranges, or, for example, two different service versions. | - - -### Use contextual prompts [observability-ai-assistant-use-contextual-prompts] - -AI Assistant contextual prompts throughout {{obs-serverless}} provide the following information: - -* **Alerts**: Provides possible causes and remediation suggestions for log rate changes. -* **Application performance monitoring (APM)**: Explains APM errors and provides remediation suggestions. -* **Logs**: Explains log messages and generates search patterns to find similar issues. - -For example, in the log details, you’ll see prompts for **What’s this message?** and **How do I find similar log messages?**: - -:::{image} /raw-migrated-files/images/serverless-ai-assistant-logs-prompts.png -:alt: Observability AI assistant example prompts for logs -:screenshot: -::: - -Clicking a prompt generates a message specific to that log entry. You can continue a conversation from a contextual prompt by clicking **Start chat** to open the AI Assistant chat. - -:::{image} /raw-migrated-files/images/serverless-ai-assistant-logs.png -:alt: Observability AI assistant example -:screenshot: -::: - - -### Add the AI Assistant connector to alerting workflows [observability-ai-assistant-add-the-ai-assistant-connector-to-alerting-workflows] - -You can use the [Observability AI Assistant connector](kibana://reference/connectors-kibana/obs-ai-assistant-action-type.md) to add AI-generated insights and custom actions to your alerting workflows. To do this: - -1. [Create (or edit) an alerting rule](../../../solutions/observability/incident-management/create-manage-rules.md) and specify the conditions that must be met for the alert to fire. -2. Under **Actions**, select the **Observability AI Assistant** connector type. -3. In the **Connector** list, select the AI connector you created when you set up the assistant. -4. In the **Message** field, specify the message to send to the assistant: - -:::{image} /raw-migrated-files/images/serverless-obs-ai-assistant-action-high-cpu.png -:alt: Add an Observability AI assistant action while creating a rule in the Observability UI -:screenshot: -::: - -You can ask the assistant to generate a report of the alert that fired, recall any information or potential resolutions of past occurrences stored in the knowledge base, provide troubleshooting guidance and resolution steps, and also include other active alerts that may be related. As a last step, you can ask the assistant to trigger an action, such as sending the report (or any other message) to a Slack webhook. - -::::{admonition} NOTE -:class: note - -Currently you can only send messages to Slack, email, Jira, PagerDuty, or a webhook. Additional actions will be added in the future. - -:::: - - -When the alert fires, contextual details about the event—such as when the alert fired, the service or host impacted, and the threshold breached—are sent to the AI Assistant, along with the message provided during configuration. The AI Assistant runs the tasks requested in the message and creates a conversation you can use to chat with the assistant: - -:::{image} /raw-migrated-files/images/serverless-obs-ai-assistant-output.png -:alt: AI Assistant conversation created in response to an alert -:screenshot: -::: - -::::{important} -Conversations created by the AI Assistant are public and accessible to every user with permissions to use the assistant. - -:::: - - -It might take a minute or two for the AI Assistant to process the message and create the conversation. - -Note that overly broad prompts may result in the request exceeding token limits. For more information, refer to [Token limits](../../../explore-analyze/ai-assistant.md#token-limits). Also, attempting to analyze several alerts in a single connector execution may cause you to exceed the function call limit. If this happens, modify the message specified in the connector configuration to avoid exceeding limits. - -When asked to send a message to another connector, such as Slack, the AI Assistant attempts to include a link to the generated conversation. - -:::{image} /raw-migrated-files/images/serverless-obs-ai-assistant-slack-message.png -:alt: Message sent by Slack by the AI Assistant includes a link to the conversation -:screenshot: -::: - -The Observability AI Assistant connector is called when the alert fires and when it recovers. - -To learn more about alerting, actions, and connectors, refer to [Alerting](../../../solutions/observability/incident-management/alerting.md). - - -## Elastic documentation for the AI Assistant [obs-ai-product-documentation] - -It is possible to make the Elastic official documentation available to the AI Assistant, which significantly increases its efficiency and accuracy in answering questions related to the Elastic stack and Elastic products. - -Enabling that feature can be done from the **Settings** tab of the AI Assistant Settings page, using the "Install Elastic Documentation" action. - - -## Known issues [observability-ai-assistant-known-issues] - - -### Token limits [token-limits] - -Most LLMs have a set number of tokens they can manage in single a conversation. When you reach the token limit, the LLM will throw an error, and Elastic will display a "Token limit reached" error. The exact number of tokens that the LLM can support depends on the LLM provider and model you’re using. If you are using an OpenAI connector, you can monitor token usage in **OpenAI Token Usage** dashboard. For more information, refer to the [OpenAI Connector documentation](kibana://reference/connectors-kibana/openai-action-type.md#openai-connector-token-dashboard). diff --git a/raw-migrated-files/docs-content/serverless/observability-stream-log-files.md b/raw-migrated-files/docs-content/serverless/observability-stream-log-files.md deleted file mode 100644 index b4782bebc0..0000000000 --- a/raw-migrated-files/docs-content/serverless/observability-stream-log-files.md +++ /dev/null @@ -1,477 +0,0 @@ -# Stream any log file [observability-stream-log-files] - -::::{admonition} Required role -:class: note - -The **Admin** role or higher is required to onboard log data. To learn more, refer to [Assign user roles and privileges](../../../deploy-manage/users-roles/cloud-organization/user-roles.md#general-assign-user-roles). - -:::: - - -
-:::{image} /raw-migrated-files/images/serverless-logs-stream-logs-api-key-beats.png -:alt: logs stream logs api key beats -:screenshot: -::: - -:::{image} /raw-migrated-files/images/serverless-log-copy-es-endpoint.png -:alt: Copy a project's Elasticsearch endpoint -:screenshot: -::: - -
-This guide shows you how to send a log file to your Observability project using a standalone {{agent}} and configure the {{agent}} and your data streams using the `elastic-agent.yml` file, and query your logs using the data streams you’ve set up. - -The quickest way to get started is using the **Monitor hosts with {{agent}}** quickstart. Refer to the [quickstart documentation](../../../solutions/observability/get-started/quickstart-monitor-hosts-with-elastic-agent.md) for more information. - -To install and configure the {{agent}} manually, refer to [Manually install and configure the standalone {{agent}}](../../../solutions/observability/logs/stream-any-log-file.md). - - -## Manually install and configure the standalone {{agent}} [manually-install-agent-logs] - -If you’re not using the guided instructions, follow these steps to manually install and configure your the {{agent}}. - - -### Step 1: Download and extract the {{agent}} installation package [observability-stream-log-files-step-1-download-and-extract-the-agent-installation-package] - -On your host, download and extract the installation package that corresponds with your system: - -:::::::{tab-set} - -::::::{tab-item} macOS -```sh -curl -L -O https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-8.16.1-darwin-x86_64.tar.gz -tar xzvf elastic-agent-8.16.1-darwin-x86_64.tar.gz -``` -:::::: - -::::::{tab-item} Linux -```sh -curl -L -O https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-8.16.1-linux-x86_64.tar.gz -tar xzvf elastic-agent-8.16.1-linux-x86_64.tar.gz -``` -:::::: - -::::::{tab-item} Windows -```powershell -# PowerShell 5.0+ -wget https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-8.16.1-windows-x86_64.zip -OutFile elastic-agent-8.16.1-windows-x86_64.zip -Expand-Archive .\elastic-agent-8.16.1-windows-x86_64.zip -``` - -Or manually: - -1. Download the {{agent}} Windows zip file from the [download page](https://www.elastic.co/downloads/beats/elastic-agent). -2. Extract the contents of the zip file. -:::::: - -::::::{tab-item} DEB -::::{important} -To simplify upgrading to future versions of {{agent}}, we recommended that you use the tarball distribution instead of the DEB distribution. - -:::: - - -```sh -curl -L -O https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-8.16.1-amd64.deb -sudo dpkg -i elastic-agent-8.16.1-amd64.deb -``` -:::::: - -::::::{tab-item} RPM -::::{important} -To simplify upgrading to future versions of {{agent}}, we recommended that you use the tarball distribution instead of the RPM distribution. - -:::: - - -```sh -curl -L -O https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-8.16.1-x86_64.rpm -sudo rpm -vi elastic-agent-8.16.1-x86_64.rpm -``` -:::::: - -::::::: - -### Step 2: Install and start the {{agent}} [observability-stream-log-files-step-2-install-and-start-the-agent] - -After downloading and extracting the installation package, you’re ready to install the {{agent}}. From the agent directory, run the install command that corresponds with your system: - -::::{note} -On macOS, Linux (tar package), and Windows, run the `install` command to install and start {{agent}} as a managed service and start the service. The DEB and RPM packages include a service unit for Linux systems with systemd, For these systems, you must enable and start the service. - -:::: - - -:::::::{tab-set} - -::::::{tab-item} macOS -::::{tip} -You must run this command as the root user because some integrations require root privileges to collect sensitive data. - -:::: - - -```shell -sudo ./elastic-agent install -``` -:::::: - -::::::{tab-item} Linux -::::{tip} -You must run this command as the root user because some integrations require root privileges to collect sensitive data. - -:::: - - -```shell -sudo ./elastic-agent install -``` -:::::: - -::::::{tab-item} Windows -Open a PowerShell prompt as an Administrator (right-click the PowerShell icon and select **Run As Administrator**). - -From the PowerShell prompt, change to the directory where you installed {{agent}}, and run: - -```shell -.\elastic-agent.exe install -``` -:::::: - -::::::{tab-item} DEB -::::{tip} -You must run this command as the root user because some integrations require root privileges to collect sensitive data. - -:::: - - -```shell -sudo systemctl enable elastic-agent <1> -sudo systemctl start elastic-agent -``` - -1. The DEB package includes a service unit for Linux systems with systemd. On these systems, you can manage {{agent}} by using the usual systemd commands. If you don’t have systemd, run `sudo service elastic-agent start`. -:::::: - -::::::{tab-item} RPM -::::{tip} -You must run this command as the root user because some integrations require root privileges to collect sensitive data. - -:::: - - -```shell -sudo systemctl enable elastic-agent <1> -sudo systemctl start elastic-agent -``` - -1. The RPM package includes a service unit for Linux systems with systemd. On these systems, you can manage {{agent}} by using the usual systemd commands. If you don’t have systemd, run `sudo service elastic-agent start`. -:::::: - -::::::: -During installation, you’ll be prompted with some questions: - -1. When asked if you want to install the agent as a service, enter `Y`. -2. When asked if you want to enroll the agent in Fleet, enter `n`. - - -### Step 3: Configure the {{agent}} [observability-stream-log-files-step-3-configure-the-agent] - -After your agent is installed, configure it by updating the `elastic-agent.yml` file. - - -#### Locate your configuration file [observability-stream-log-files-locate-your-configuration-file] - -You’ll find the `elastic-agent.yml` in one of the following locations according to your system: - -:::::::{tab-set} - -::::::{tab-item} macOS -Main {{agent}} configuration file location: - -`/Library/Elastic/Agent/elastic-agent.yml` -:::::: - -::::::{tab-item} Linux -Main {{agent}} configuration file location: - -`/opt/Elastic/Agent/elastic-agent.yml` -:::::: - -::::::{tab-item} Windows -Main {{agent}} configuration file location: - -`C:\Program Files\Elastic\Agent\elastic-agent.yml` -:::::: - -::::::{tab-item} DEB -Main {{agent}} configuration file location: - -`/etc/elastic-agent/elastic-agent.yml` -:::::: - -::::::{tab-item} RPM -Main {{agent}} configuration file location: - -`/etc/elastic-agent/elastic-agent.yml` -:::::: - -::::::: - -#### Update your configuration file [observability-stream-log-files-update-your-configuration-file] - -Update the default configuration in the `elastic-agent.yml` file manually. It should look something like this: - -```yaml -outputs: - default: - type: elasticsearch - hosts: ':' - api_key: 'your-api-key' -inputs: - - id: your-log-id - type: filestream - streams: - - id: your-log-stream-id - data_stream: - dataset: example - paths: - - /var/log/your-logs.log -``` - -You need to set the values for the following fields: - -`hosts` -: Copy the {{es}} endpoint from your project’s page and add the port (the default port is `443`). For example, `https://my-deployment.es.us-central1.gcp.cloud.es.io:443`. - - If you’re following the guided instructions in your project, the {{es}} endpoint will be prepopulated in the configuration file. - - :::::{tip} - If you need to find your project’s {{es}} endpoint outside the guided instructions: - - 1. Go to the **Projects** page that lists all your projects. - 2. Click **Manage** next to the project you want to connect to. - 3. Click **View** next to *Endpoints*. - 4. Copy the *Elasticsearch endpoint*. - - :::{image} /raw-migrated-files/images/serverless-log-copy-es-endpoint.png - :alt: Copy a project's Elasticsearch endpoint - :screenshot: - ::: - - ::::: - - -`api-key` -: Use an API key to grant the agent access to your project. The API key format should be `:`. - - If you’re following the guided instructions in your project, an API key will be autogenerated and will be prepopulated in the downloadable configuration file. - - If configuring the {{agent}} manually, create an API key: - - 1. Navigate to **Project settings** → **Management*** → ***API keys** and click **Create API key**. - 2. Select **Restrict privileges** and add the following JSON to give privileges for ingesting logs. - - ```json - { - "standalone_agent": { - "cluster": [ - "monitor" - ], - "indices": [ - { - "names": [ - "logs-*-*" - ], - "privileges": [ - "auto_configure", "create_doc" - ] - } - ] - } - } - ``` - - 3. You *must* set the API key to configure {{beats}}. Immediately after the API key is generated and while it is still being displayed, click the **Encoded** button next to the API key and select **Beats** from the list in the tooltip. Base64 encoded API keys are not currently supported in this configuration. - - :::{image} /raw-migrated-files/images/serverless-logs-stream-logs-api-key-beats.png - :alt: logs stream logs api key beats - :screenshot: - ::: - - -`inputs.id` -: A unique identifier for your input. - -`type` -: The type of input. For collecting logs, set this to `filestream`. - -`streams.id` -: A unique identifier for your stream of log data. - -`data_stream.dataset` -: The name for your dataset data stream. Name this data stream anything that signifies the source of the data. In this configuration, the dataset is set to `example`. The default value is `generic`. - -`paths` -: The path to your log files. You can also use a pattern like `/var/log/your-logs.log*`. - - -#### Restart the {{agent}} [observability-stream-log-files-restart-the-agent] - -After updating your configuration file, you need to restart the {{agent}}. - -First, stop the {{agent}} and its related executables using the command that works with your system: - -:::::::{tab-set} - -::::::{tab-item} macOS -```shell -sudo launchctl unload /Library/LaunchDaemons/co.elastic.elastic-agent.plist -``` - -::::{note} -{{agent}} will restart automatically if the system is rebooted. - -:::: -:::::: - -::::::{tab-item} Linux -```shell -sudo service elastic-agent stop -``` - -::::{note} -{{agent}} will restart automatically if the system is rebooted. - -:::: -:::::: - -::::::{tab-item} Windows -```shell -Stop-Service Elastic Agent -``` - -If necessary, use Task Manager on Windows to stop {{agent}}. This will kill the `elastic-agent` process and any sub-processes it created (such as {{beats}}). - -::::{note} -{{agent}} will restart automatically if the system is rebooted. - -:::: -:::::: - -::::::{tab-item} DEB -The DEB package includes a service unit for Linux systems with systemd. On these systems, you can manage {{agent}} by using the usual systemd commands. - -Use `systemctl` to stop the agent: - -```shell -sudo systemctl stop elastic-agent -``` - -Otherwise, use: - -```shell -sudo service elastic-agent stop -``` - -::::{note} -{{agent}} will restart automatically if the system is rebooted. - -:::: -:::::: - -::::::{tab-item} RPM -The RPM package includes a service unit for Linux systems with systemd. On these systems, you can manage {{agent}} by using the usual systemd commands. - -Use `systemctl` to stop the agent: - -```shell -sudo systemctl stop elastic-agent -``` - -Otherwise, use: - -```shell -sudo service elastic-agent stop -``` - -::::{note} -{{agent}} will restart automatically if the system is rebooted. - -:::: -:::::: - -::::::: -Next, restart the {{agent}} using the command that works with your system: - -:::::::{tab-set} - -::::::{tab-item} macOS -```shell -sudo launchctl load /Library/LaunchDaemons/co.elastic.elastic-agent.plist -``` -:::::: - -::::::{tab-item} Linux -```shell -sudo service elastic-agent start -``` -:::::: - -::::::{tab-item} Windows -```shell -Start-Service Elastic Agent -``` -:::::: - -::::::{tab-item} DEB -The DEB package includes a service unit for Linux systems with systemd. On these systems, you can manage {{agent}} by using the usual systemd commands. - -Use `systemctl` to start the agent: - -```shell -sudo systemctl start elastic-agent -``` - -Otherwise, use: - -```shell -sudo service elastic-agent start -``` -:::::: - -::::::{tab-item} RPM -The RPM package includes a service unit for Linux systems with systemd. On these systems, you can manage {{agent}} by using the usual systemd commands. - -Use `systemctl` to start the agent: - -```shell -sudo systemctl start elastic-agent -``` - -Otherwise, use: - -```shell -sudo service elastic-agent start -``` -:::::: - -::::::: - -## Troubleshoot your {{agent}} configuration [observability-stream-log-files-troubleshoot-your-agent-configuration] - -If you’re not seeing your log files in your project, verify the following in the `elastic-agent.yml` file: - -* The path to your logs file under `paths` is correct. -* Your API key is in `:` format. If not, your API key may be in an unsupported format, and you’ll need to create an API key in **Beats** format. - -If you’re still running into issues, refer to [{{agent}} troubleshooting](../../../troubleshoot/ingest/fleet/common-problems.md) and [Configure standalone Elastic Agents](/reference/fleet/configure-standalone-elastic-agents.md). - - -## Next steps [observability-stream-log-files-next-steps] - -After you have your agent configured and are streaming log data to your project: - -* Refer to the [Parse and organize logs](../../../solutions/observability/logs/parse-route-logs.md) documentation for information on extracting structured fields from your log data, rerouting your logs to different data streams, and filtering and aggregating your log data. -* Refer to the [Filter and aggregate logs](../../../solutions/observability/logs/filter-aggregate-logs.md) documentation for information on filtering and aggregating your log data to find specific information, gain insight, and monitor your systems more efficiently. diff --git a/raw-migrated-files/docs-content/serverless/what-is-observability-serverless.md b/raw-migrated-files/docs-content/serverless/what-is-observability-serverless.md deleted file mode 100644 index d8ab2f6f9a..0000000000 --- a/raw-migrated-files/docs-content/serverless/what-is-observability-serverless.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -navigation_title: "Elastic Observability" ---- - -# {{obs-serverless}} [what-is-observability-serverless] - - -{{obs-serverless}} accelerates problem resolution with open, flexible, and unified observability powered by advanced machine learning and analytics. Elastic ingests all operational and business telemetry and correlates for faster root cause detection. - -Not using serverless? Go to the [Elastic Observability docs](../../../solutions/observability.md). - - -## Get started [_get_started] - -* [**Get started**](../../../solutions/observability/get-started.md): Discover more about our observability features and how to get started. -* [**Quickstart: Monitor hosts with Elastic Agent**](../../../solutions/observability/get-started/quickstart-monitor-hosts-with-elastic-agent.md): Scan your host to detect and collect logs and metrics. -* [**Quickstart: Monitor your Kubernetes cluster with Elastic Agent**](../../../solutions/observability/get-started/quickstart-monitor-kubernetes-cluster-with-elastic-agent.md): Create the Kubernetes resources that are required to monitor your cluster infrastructure. -* [**Get started with Logs**](../../../solutions/observability/logs/get-started-with-system-logs.md): Add your log data to {{obs-serverless}} and start exploring your logs. -* [**Get started with traces and APM**](../../../solutions/observability/apps/get-started-with-apm.md): Collect Application Performance Monitoring (APM) data and visualize it in real time. -* [**Get started with metrics**](../../../solutions/observability/infra-and-hosts/get-started-with-system-metrics.md): Add your metrics data to {{obs-serverless}} and visualize it in real time. - - -## How to [_how_to] - -* [**Explore log data**](../../../solutions/observability/logs/logs-explorer.md): Use Discover to explore your log data. -* [**Trigger alerts and triage problems**](../../../solutions/observability/incident-management/create-manage-rules.md): Create rules to detect complex conditions and trigger alerts. -* [**Track and deliver on your SLOs**](../../../solutions/observability/incident-management/service-level-objectives-slos.md): Measure key metrics important to the business. -* [**Detect anomalies and spikes**](../../../explore-analyze/machine-learning/anomaly-detection.md): Find unusual behavior in time series data. -* [**Monitor application performance**](../../../solutions/observability/apps/application-performance-monitoring-apm.md): Monitor your software services and applications in real time. -* [**Integrate with OpenTelemetry**](../../../solutions/observability/apps/use-opentelemetry-with-apm.md): Reuse existing APM instrumentation to capture logs, traces, and metrics. -* [**Monitor your hosts and services**](../../../solutions/observability/infra-and-hosts/analyze-compare-hosts.md): Get a metrics-driven view of your hosts backed by an interface called Lens. - - - - - - - - - - - - diff --git a/raw-migrated-files/observability-docs/observability/index.md b/raw-migrated-files/observability-docs/observability/index.md deleted file mode 100644 index c3706a64d1..0000000000 --- a/raw-migrated-files/observability-docs/observability/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# Observability - -Migrated files from the Observability book. diff --git a/raw-migrated-files/observability-docs/observability/obs-ai-assistant.md b/raw-migrated-files/observability-docs/observability/obs-ai-assistant.md deleted file mode 100644 index 524eab3f73..0000000000 --- a/raw-migrated-files/observability-docs/observability/obs-ai-assistant.md +++ /dev/null @@ -1,411 +0,0 @@ -# Observability AI Assistant [obs-ai-assistant] - -::::{important} -To run the Observability AI Assistant on self-hosted Elastic stack, you need an [appropriate license](https://www.elastic.co/subscriptions). -:::: - - -The AI Assistant uses generative AI to provide: - -* **Contextual insights** — open prompts throughout {{observability}} that explain errors and messages and suggest remediation. -* **Chat** — have conversations with the AI Assistant. Chat uses function calling to request, analyze, and visualize your data. - -:::{image} /raw-migrated-files/images/observability-obs-assistant2.gif -:alt: Observability AI assistant preview -:screenshot: -::: - -The AI Assistant integrates with your large language model (LLM) provider through our supported {{stack}} connectors: - -* [OpenAI connector](kibana://reference/connectors-kibana/openai-action-type.md) for OpenAI or Azure OpenAI Service. -* [Amazon Bedrock connector](kibana://reference/connectors-kibana/bedrock-action-type.md) for Amazon Bedrock, specifically for the Claude models. -* [Google Gemini connector](kibana://reference/connectors-kibana/gemini-action-type.md) for Google Gemini. - -::::{important} -The AI Assistant is powered by an integration with your large language model (LLM) provider. LLMs are known to sometimes present incorrect information as if it’s correct. Elastic supports configuration and connection to the LLM provider and your knowledge base, but is not responsible for the LLM’s responses. - -:::: - - -::::{important} -Also, the data you provide to the Observability AI assistant is *not* anonymized, and is stored and processed by the third-party AI provider. This includes any data used in conversations for analysis or context, such as alert or event data, detection rule configurations, and queries. Therefore, be careful about sharing any confidential or sensitive details while using this feature. - -:::: - - - -## Requirements [obs-ai-requirements] - -The AI assistant requires the following: - -* {{stack}} version 8.9 and later. -* A [self-managed](elasticsearch://reference/ingestion-tools/search-connectors/self-managed-connectors.md) connector service must be deployed if search connectors are used to populate external data into the knowledge base. -* An account with a third-party generative AI provider that preferably supports function calling. If your AI provider does not support function calling, you can configure AI Assistant settings under **Stack Management** to simulate function calling, but this might affect performance. - - Refer to the [connector documentation](../../../deploy-manage/manage-connectors.md) for your provider to learn about supported and default models. - -* The knowledge base requires a 4 GB {{ml}} node. - -::::{important} -The free tier offered by third-party generative AI provider may not be sufficient for the proper functioning of the AI assistant. In most cases, a paid subscription to one of the supported providers is required. - -The Observability AI assistant doesn’t support connecting to a private LLM. Elastic doesn’t recommend using private LLMs with the Observability AI assistant. - -:::: - - -::::{important} -In {{ecloud}} or {{ece}}, if you have Machine Learning autoscaling enabled, Machine Learning nodes will be started when using the knowledge base and AI Assistant. Therefore using these features will incur additional costs. - -:::: - - - -## Your data and the AI Assistant [data-information] - -Elastic does not use customer data for model training. This includes anything you send the model, such as alert or event data, detection rule configurations, queries, and prompts. However, any data you provide to the AI Assistant will be processed by the third-party provider you chose when setting up the OpenAI connector as part of the assistant setup. - -Elastic does not control third-party tools, and assumes no responsibility or liability for their content, operation, or use, nor for any loss or damage that may arise from your using such tools. Please exercise caution when using AI tools with personal, sensitive, or confidential information. Any data you submit may be used by the provider for AI training or other purposes. There is no guarantee that the provider will keep any information you provide secure or confidential. You should familiarize yourself with the privacy practices and terms of use of any generative AI tools prior to use. - - -## Set up the AI Assistant [obs-ai-set-up] - -To set up the AI Assistant: - -1. Create an authentication key with your AI provider to authenticate requests from the AI Assistant. You’ll use this in the next step. Refer to your provider’s documentation for information about creating authentication keys: - - * [OpenAI API keys](https://platform.openai.com/docs/api-reference) - * [Azure OpenAI Service API keys](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference) - * [Amazon Bedrock authentication keys and secrets](https://docs.aws.amazon.com/bedrock/latest/userguide/security-iam.html) - * [Google Gemini service account keys](https://cloud.google.com/iam/docs/keys-list-get) - -2. Create a connector for your AI provider. Refer to the connector documentation to learn how: - - * [OpenAI](kibana://reference/connectors-kibana/openai-action-type.md) - * [Amazon Bedrock](kibana://reference/connectors-kibana/bedrock-action-type.md) - * [Google Gemini](kibana://reference/connectors-kibana/gemini-action-type.md) - -3. Authenticate communication between {{observability}} and the AI provider by providing the following information: - - 1. In the **URL** field, enter the AI provider’s API endpoint URL. - 2. Under **Authentication**, enter the key or secret you created in the previous step. - - - -## Add data to the AI Assistant knowledge base [obs-ai-add-data] - -::::{important} -**If you started using the AI Assistant in technical preview**, any knowledge base articles you created before 8.12 will have to be reindexed or upgraded before they can be used. Knowledge base articles created before 8.12 use ELSER v1. In 8.12, knowledge base articles must use ELSER v2. Options include: - -* Clear all old knowledge base articles manually and reindex them. -* Upgrade all knowledge base articles indexed with ELSER v1 to ELSER v2 using a [Python script](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/model-upgrades/upgrading-index-to-use-elser.ipynb). - -:::: - - -The AI Assistant uses [ELSER](../../../explore-analyze/machine-learning/nlp/ml-nlp-elser.md), Elastic’s semantic search engine, to recall data from its internal knowledge base index to create retrieval augmented generation (RAG) responses. Adding data such as Runbooks, GitHub issues, internal documentation, and Slack messages to the knowledge base gives the AI Assistant context to provide more specific assistance. - -::::{note} -Your AI provider may collect telemetry when using the AI Assistant. Contact your AI provider for information on how data is collected. -:::: - - -Add data to the knowledge base with one or more of the following methods: - -* [Use the knowledge base UI](../../../solutions/observability/observability-ai-assistant.md#obs-ai-kb-ui) available at [AI Assistant Settings](../../../solutions/observability/observability-ai-assistant.md#obs-ai-settings) page. -* [Use search connectors](../../../solutions/observability/observability-ai-assistant.md#obs-ai-search-connectors) - -You can also add information to the knowledge base by asking the AI Assistant to remember something while chatting (for example, "remember this for next time"). The assistant will create a summary of the information and add it to the knowledge base. - - -### Use the knowledge base UI [obs-ai-kb-ui] - -To add external data to the knowledge base in {{kib}}: - -1. To open AI Assistant settings, find `AI Assistants` in the [global search field](/explore-analyze/find-and-organize/find-apps-and-objects.md). -2. Under **Elastic AI Assistant for Observability**, click **Manage settings**. -3. Switch to the **Knowledge base** tab. -4. Click the **New entry** button, and choose either: - - * **Single entry**: Write content for a single entry in the UI. - * **Bulk import**: Upload a newline delimited JSON (`ndjson`) file containing a list of entries to add to the knowledge base. Each object should conform to the following format: - - ```json - { - "id": "a_unique_human_readable_id", - "text": "Contents of item" - } - ``` - - - -### Use search connectors [obs-ai-search-connectors] - -::::{tip} -The [search connectors](elasticsearch://reference/ingestion-tools/search-connectors/index.md) described in this section differ from the [Stack management → Connectors](../../../deploy-manage/manage-connectors.md) configured during the [AI Assistant setup](../../../solutions/observability/observability-ai-assistant.md#obs-ai-set-up). Search connectors are only needed when importing external data into the Knowledge base of the AI Assistant, while the stack connector to the LLM is required for the AI Assistant to work. - -:::: - - -[Connectors](elasticsearch://reference/ingestion-tools/search-connectors/index.md) allow you to index content from external sources thereby making it available for the AI Assistant. This can greatly improve the relevance of the AI Assistant’s responses. Data can be integrated from sources such as GitHub, Confluence, Google Drive, Jira, AWS S3, Microsoft Teams, Slack, and more. - -UI affordances for creating and managing search connectors are available in the Search Solution in {{kib}}. You can also use the {{es}} [Connector APIs](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-connector) to create and manage search connectors. - -A [self-managed](elasticsearch://reference/ingestion-tools/search-connectors/self-managed-connectors.md) connector service must be deployed to run connectors. - -By default, the AI Assistant queries all search connector indices. To override this behavior and customize which indices are queried, adjust the **Search connector index pattern** setting on the [AI Assistant Settings](../../../solutions/observability/observability-ai-assistant.md#obs-ai-settings) page. This allows precise control over which data sources are included in AI Assistant knowledge base. - -To create a connector in the {{kib}} UI and make its content available to the AI Assistant knowledge base, follow these steps: - -1. Open **Connectors** by finding `Content / Connectors` in the [global search field](/explore-analyze/find-and-organize/find-apps-and-objects.md). - - ::::{note} - If your {{kib}} Space doesn’t include the Search solution you will have to create the connector from a different space or change your space **Solution view** setting to `Classic`. - - :::: - -2. Follow the instructions to create a new connector. - - For example, if you create a [GitHub connector](elasticsearch://reference/ingestion-tools/search-connectors/es-connectors-github.md) you have to set a `name`, attach it to a new or existing `index`, add your `personal access token` and include the `list of repositories` to synchronize. - - Learn more about configuring and [using connectors](elasticsearch://reference/ingestion-tools/search-connectors/connectors-ui-in-kibana.md) in the Elasticsearch documentation. - - -After creating your connector, create the embeddings needed by the AI Assistant. You can do this using either: - -* [a machine learning (ML) pipeline](../../../solutions/observability/observability-ai-assistant.md#obs-ai-search-connectors-ml-embeddings): requires the ELSER ML model. -* [a `semantic_text` field type](../../../solutions/observability/observability-ai-assistant.md#obs-ai-search-connectors-semantic-text): can use any available ML model (ELSER, E5, or a custom model). - - -#### Use machine learning pipelines to create AI Assistant embeddings [obs-ai-search-connectors-ml-embeddings] - -To create the embeddings needed by the AI Assistant (weights and tokens into a sparse vector field) using an **ML Inference Pipeline**: - -1. Open the previously created connector, and select the **Pipelines** tab. -2. Select **Copy and customize** under `Unlock your custom pipelines`. -3. Select **Add Inference Pipeline** under `Machine Learning Inference Pipelines`. -4. Select the **ELSER (Elastic Learned Sparse EncodeR)** ML model to add the necessary embeddings to the data. -5. Select the fields that need to be evaluated as part of the inference pipeline. -6. Test and save the inference pipeline and the overall pipeline. - -After creating the pipeline, complete the following steps: - -1. Sync the data. - - Once the pipeline is set up, perform a **Full Content Sync** of the connector. The inference pipeline will process the data as follows: - - * As data comes in, ELSER is applied to the data, and embeddings (weights and tokens into a [sparse vector field](elasticsearch://reference/query-languages/query-dsl/query-dsl-sparse-vector-query.md)) are added to capture semantic meaning and context of the data. - * When you look at the ingested documents, you can see the embeddings are added to the `predicted_value` field in the documents. - -2. Check if AI Assistant can use the index (optional). - - Ask something to the AI Assistant related with the indexed data. - - - -#### Use a `semantic_text` field type to create AI Assistant embeddings [obs-ai-search-connectors-semantic-text] - -To create the embeddings needed by the AI Assistant using a [`semantic_text`](elasticsearch://reference/elasticsearch/mapping-reference/semantic-text.md) field type: - -1. Open the previously created connector, and select the **Mappings** tab. -2. Select **Add field**. -3. Under **Field type**, select **Semantic text**. -4. Under **Reference field**, select the field you want to use for model inference. -5. Under **Select an inference endpoint**, select the model you want to use to add the embeddings to the data. -6. Add the field to your mapping by selecting **Add field**. -7. Sync the data by selecting **Full Content** from the **Sync** menu. - -The AI Assistant will now query the connector you’ve set up using the model you’ve selected. Check that the AI Assistant is using the index by asking it something related to the indexed data. - - -## Interact with the AI Assistant [obs-ai-interact] - -Chat with the AI Assistant or interact with contextual insights located throughout {{observability}}. Check the following sections for more on interacting with the AI Assistant. - -::::{tip} -After every answer the LLM provides, let us know if the answer was helpful. Your feedback helps us improve the AI Assistant! -:::: - - - -### Chat with the assistant [obs-ai-chat] - -Select the **AI Assistant** icon (![AI Assistant icon](/raw-migrated-files/images/observability-ai-assistant-icon.png "")) at the upper-right corner of any {{observability}} application to start the chat. - -This opens the AI Assistant flyout, where you can ask the assistant questions about your instance: - -:::{image} /raw-migrated-files/images/observability-obs-ai-chat.png -:alt: Observability AI assistant chat -:screenshot: -::: - -::::{important} -Asking questions about your data requires `function calling`, which enables LLMs to reliably interact with third-party generative AI providers to perform searches or run advanced functions using customer data. - -When the {{observability}} AI Assistant performs searches in the cluster, the queries are run with the same level of permissions as the user. - -:::: - - - -### Suggest functions [obs-ai-functions] - -::::{warning} -This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features. -:::: - - -The AI Assistant uses functions to include relevant context in the chat conversation through text, data, and visual components. Both you and the AI Assistant can suggest functions. You can also edit the AI Assistant’s function suggestions and inspect function responses. - -Main functions: - -`alerts` -: Get alerts for {{observability}}. - -`elasticsearch` -: Call {{es}} APIs on your behalf. - -`kibana` -: Call {{kib}} APIs on your behalf. - -`summarize` -: Summarize parts of the conversation. - -`visualize_query` -: Visualize charts for ES|QL queries. - -Additional functions are available when your cluster has APM data: - -`get_apm_correlations` -: Get field values that are more prominent in the foreground set than the background set. This can be useful in determining which attributes (such as `error.message`, `service.node.name`, or `transaction.name`) are contributing to, for instance, a higher latency. Another option is a time-based comparison, where you compare before and after a change point. - -`get_apm_downstream_dependencies` -: Get the downstream dependencies (services or uninstrumented backends) for a service. Map the downstream dependency name to a service by returning both `span.destination.service.resource` and `service.name`. Use this to drill down further if needed. - -`get_apm_error_document` -: Get a sample error document based on the grouping name. This also includes the stacktrace of the error, which might hint to the cause. - -`get_apm_service_summary` -: Get a summary of a single service, including the language, service version, deployments, the environments, and the infrastructure that it is running in. For example, the number of pods and a list of their downstream dependencies. It also returns active alerts and anomalies. - -`get_apm_services_list` -: Get the list of monitored services, their health statuses, and alerts. - -`get_apm_timeseries` -: Display different APM metrics (such as throughput, failure rate, or latency) for any service or all services and any or all of their dependencies. Displayed both as a time series and as a single statistic. Additionally, the function returns any changes, such as spikes, step and trend changes, or dips. You can also use it to compare data by requesting two different time ranges, or, for example, two different service versions. - - -### Use contextual prompts [obs-ai-prompts] - -AI Assistant contextual prompts throughout {{observability}} provide the following information: - -* **Universal Profiling** — explains the most expensive libraries and functions in your fleet and provides optimization suggestions. -* **Application performance monitoring (APM)** — explains APM errors and provides remediation suggestions. -* **Infrastructure Observability** — explains the processes running on a host. -* **Logs** — explains log messages and generates search patterns to find similar issues. -* **Alerting** — provides possible causes and remediation suggestions for log rate changes. - -For example, in the log details, you’ll see prompts for **What’s this message?** and **How do I find similar log messages?**: - -:::{image} /raw-migrated-files/images/observability-obs-ai-logs-prompts.png -:alt: Observability AI assistant logs prompts -:screenshot: -::: - -Clicking a prompt generates a message specific to that log entry: - -:::{image} /raw-migrated-files/images/observability-obs-ai-logs.gif -:alt: Observability AI assistant example -:screenshot: -::: - -Continue a conversation from a contextual prompt by clicking **Start chat** to open the AI Assistant chat. - - -### Add the AI Assistant connector to alerting workflows [obs-ai-connector] - -Use the [Observability AI Assistant connector](kibana://reference/connectors-kibana/obs-ai-assistant-action-type.md) to add AI-generated insights and custom actions to your alerting workflows as follows: - -1. [Create (or edit) an alerting rule](../../../solutions/observability/incident-management/create-manage-rules.md) and specify the conditions that must be met for the alert to fire. -2. Under **Actions**, select the **Observability AI Assistant** connector type. -3. In the **Connector** list, select the AI connector you created when you set up the assistant. -4. In the **Message** field, specify the message to send to the assistant: - - :::{image} /raw-migrated-files/images/observability-obs-ai-assistant-action-high-cpu.png - :alt: Add an Observability AI assistant action while creating a rule in the Observability UI - :screenshot: - ::: - - -You can ask the assistant to generate a report of the alert that fired, recall any information or potential resolutions of past occurrences stored in the knowledge base, provide troubleshooting guidance and resolution steps, and also include other active alerts that may be related. As a last step, you can ask the assistant to trigger an action, such as sending the report (or any other message) to a Slack webhook. - -::::{note} -Currently only Slack, email, Jira, PagerDuty, or webhook actions are supported. Additional actions will be added in the future. -:::: - - -When the alert fires, contextual details about the event—such as when the alert fired, the service or host impacted, and the threshold breached—are sent to the AI Assistant, along with the message provided during configuration. The AI Assistant runs the tasks requested in the message and creates a conversation you can use to chat with the assistant: - -:::{image} /raw-migrated-files/images/observability-obs-ai-assistant-output.png -:alt: AI Assistant conversation created in response to an alert -:screenshot: -::: - -::::{important} -Conversations created by the AI Assistant are public and accessible to every user with permissions to use the assistant. -:::: - - -It might take a minute or two for the AI Assistant to process the message and create the conversation. - -Note that overly broad prompts may result in the request exceeding token limits. For more information, refer to [Token limits](../../../solutions/observability/observability-ai-assistant.md#obs-ai-token-limits). Also, attempting to analyze several alerts in a single connector execution may cause you to exceed the function call limit. If this happens, modify the message specified in the connector configuration to avoid exceeding limits. - -When asked to send a message to another connector, such as Slack, the AI Assistant attempts to include a link to the generated conversation. - -::::{tip} -The `server.publicBaseUrl` setting must be correctly specified under {{kib}} settings, or the AI Assistant is unable to generate this link. -:::: - - -:::{image} /raw-migrated-files/images/observability-obs-ai-assistant-slack-message.png -:alt: Message sent by Slack by the AI Assistant includes a link to the conversation -:screenshot: -::: - -The Observability AI Assistant connector is called when the alert fires and when it recovers. - -To learn more about alerting, actions, and connectors, refer to [Alerting](../../../solutions/observability/incident-management/alerting.md). - - -## AI Assistant Settings [obs-ai-settings] - -To access the AI Assistant Settings page, you can: - -* Find `AI Assistants` in the [global search field](/explore-analyze/find-and-organize/find-apps-and-objects.md). -* Use the **More actions** menu inside the AI Assistant window. - -The AI Assistant Settings page contains the following tabs: - -* **Settings**: Configures the main AI Assistant settings, which are explained directly within the interface. -* **Knowledge base**: Manages [knowledge base entries](../../../solutions/observability/observability-ai-assistant.md#obs-ai-kb-ui). -* **Search Connectors**: Provides a link to {{kib}} **Search** → **Content** → **Connectors** UI for connectors configuration. - - -## Elastic documentation for the AI Assistant [obs-ai-product-documentation] - -It is possible to make the Elastic official documentation available to the AI Assistant, which significantly increases its efficiency and accuracy in answering questions related to the Elastic stack and Elastic products. - -Enabling that feature can be done from the **Settings** tab of the AI Assistant Settings page, using the "Install Elastic Documentation" action. - -::::{important} -Installing the product documentation in air gapped environments requires specific installation and configuration instructions, which are available in the [{{kib}} Kibana AI Assistants settings documentation](kibana://reference/configuration-reference/ai-assistant-settings.md). -:::: - - - -## Known issues [obs-ai-known-issues] - - -### Token limits [obs-ai-token-limits] - -Most LLMs have a set number of tokens they can manage in single a conversation. When you reach the token limit, the LLM will throw an error, and Elastic will display a "Token limit reached" error in Kibana. The exact number of tokens that the LLM can support depends on the LLM provider and model you’re using. If you use an OpenAI connector, monitor token utilization in **OpenAI Token Usage** dashboard. For more information, refer to the [OpenAI Connector documentation](kibana://reference/connectors-kibana/openai-action-type.md#openai-connector-token-dashboard). diff --git a/raw-migrated-files/toc.yml b/raw-migrated-files/toc.yml deleted file mode 100644 index d419d3d6ad..0000000000 --- a/raw-migrated-files/toc.yml +++ /dev/null @@ -1,31 +0,0 @@ -project: 'Files to pull from' -toc: - - file: index.md - - file: apm-agent-android/apm-agent-android/index.md - children: - - file: apm-agent-android/apm-agent-android/release-notes.md - - file: apm-agent-ios/apm-agent-swift/index.md - children: - - file: apm-agent-ios/apm-agent-swift/release-notes-v1.0.0.md - - file: apm-agent-ios/apm-agent-swift/release-notes-v1.0.1.md - - file: apm-agent-ruby/apm-agent-ruby/index.md - children: - - file: apm-agent-ruby/apm-agent-ruby/release-notes-4.x.md - - file: cloud/cloud/index.md - children: - - file: cloud/cloud/ec-monitoring-setup.md - - file: docs-content/serverless/index.md - children: - - file: docs-content/serverless/observability-ai-assistant.md - - file: docs-content/serverless/observability-apm-get-started.md - - file: docs-content/serverless/observability-ecs-application-logs.md - - file: docs-content/serverless/observability-plaintext-application-logs.md - - file: docs-content/serverless/observability-stream-log-files.md - - file: docs-content/serverless/what-is-observability-serverless.md - - file: observability-docs/observability/index.md - children: - - file: observability-docs/observability/index.md - - file: observability-docs/observability/obs-ai-assistant.md - - file: stack-docs/elastic-stack/index.md - children: - - file: stack-docs/elastic-stack/installing-stack-demo-self.md diff --git a/troubleshoot/observability/troubleshoot-logs.md b/troubleshoot/observability/troubleshoot-logs.md index b1b3bcaba1..0ff9770e82 100644 --- a/troubleshoot/observability/troubleshoot-logs.md +++ b/troubleshoot/observability/troubleshoot-logs.md @@ -39,7 +39,7 @@ You need permission to manage API keys You need to either: * Ask an administrator to update your user role to at least **Developer** by going to the user icon on the header bar and opening **Organization** → **Members**. Read more about user roles in [](/deploy-manage/users-roles/cloud-organization/user-roles.md). After your use role is updated, restart the onboarding flow. -* Get an API key from an administrator and manually add the API to the {{agent}} configuration. See [Configure the {{agent}}](../../raw-migrated-files/docs-content/serverless/observability-stream-log-files.md#observability-stream-log-files-step-3-configure-the-agent) for more on manually updating the configuration and adding the API key. +* Get an API key from an administrator and manually add the API to the {{agent}} configuration. See [Configure the {{agent}}](/solutions/observability/logs/stream-any-log-file.md#logs-stream-agent-config) for more on manually updating the configuration and adding the API key. :::: ::::{tab-item} {{stack}}