diff --git a/.github/ISSUE_TEMPLATE/docs-request-internal.yaml b/.github/ISSUE_TEMPLATE/docs-request-internal.yaml index 1010383b67..19839308be 100644 --- a/.github/ISSUE_TEMPLATE/docs-request-internal.yaml +++ b/.github/ISSUE_TEMPLATE/docs-request-internal.yaml @@ -44,19 +44,14 @@ body: default: 0 validations: required: true - - type: dropdown + - type: textarea id: version-ess attributes: label: ESS release - description: Select a release version if your request is tied to the Elastic Stack release schedule. - options: - - '8.12' - - '8.13' - - '8.14' - - '8.15' - - '8.16' - - 'N/A' - default: 0 + description: Please provide a release version if your request is tied to the Elastic Stack release schedule. + placeholder: | + For example: + "The functionality is being introduced in ESS version 8.18.0" validations: required: true - type: input diff --git a/.mergify.yml b/.mergify.yml index b547826c0d..a8878f4820 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -29,7 +29,6 @@ pull_request_rules: - name: backport patches to 8.x branch conditions: - merged - - base=main - label=v8.18.0 actions: backport: @@ -43,7 +42,6 @@ pull_request_rules: - name: backport patches to 8.17 branch conditions: - merged - - base=main - label=v8.17.0 actions: backport: @@ -57,7 +55,6 @@ pull_request_rules: - name: backport patches to 8.16 branch conditions: - merged - - base=main - label=v8.16.0 actions: backport: @@ -71,7 +68,6 @@ pull_request_rules: - name: backport patches to 8.15 branch conditions: - merged - - base=main - label=v8.15.0 actions: backport: @@ -85,7 +81,6 @@ pull_request_rules: - name: backport patches to 8.14 branch conditions: - merged - - base=main - label=v8.14.0 actions: backport: @@ -99,7 +94,6 @@ pull_request_rules: - name: backport patches to 8.13 branch conditions: - merged - - base=main - label=v8.13.0 actions: backport: @@ -113,7 +107,6 @@ pull_request_rules: - name: backport patches to 8.12 branch conditions: - merged - - base=main - label=v8.12.0 actions: backport: @@ -127,7 +120,6 @@ pull_request_rules: - name: backport patches to 8.11 branch conditions: - merged - - base=main - label=v8.11.0 actions: backport: @@ -141,7 +133,6 @@ pull_request_rules: - name: backport patches to 8.10 branch conditions: - merged - - base=main - label=v8.10.0 actions: backport: @@ -155,7 +146,6 @@ pull_request_rules: - name: backport patches to 8.9 branch conditions: - merged - - base=main - label=v8.9.0 actions: backport: @@ -169,7 +159,6 @@ pull_request_rules: - name: backport patches to 8.8 branch conditions: - merged - - base=main - label=v8.8.0 actions: backport: @@ -183,7 +172,6 @@ pull_request_rules: - name: backport patches to 8.7 branch conditions: - merged - - base=main - label=v8.7.0 actions: backport: @@ -197,7 +185,6 @@ pull_request_rules: - name: backport patches to 8.6 branch conditions: - merged - - base=main - label=v8.6.0 actions: backport: @@ -211,7 +198,6 @@ pull_request_rules: - name: backport patches to 8.5 branch conditions: - merged - - base=main - label=v8.5.0 actions: backport: @@ -225,7 +211,6 @@ pull_request_rules: - name: backport patches to 8.4 branch conditions: - merged - - base=main - label=v8.4.0 actions: backport: @@ -239,7 +224,6 @@ pull_request_rules: - name: backport patches to 8.3 branch conditions: - merged - - base=main - label=v8.3.0 actions: backport: @@ -253,7 +237,6 @@ pull_request_rules: - name: backport patches to 8.2 branch conditions: - merged - - base=main - label=v8.2.0 actions: backport: @@ -267,7 +250,6 @@ pull_request_rules: - name: backport patches to 8.1 branch conditions: - merged - - base=main - label=v8.1.0 actions: backport: @@ -278,31 +260,29 @@ pull_request_rules: title: "[{{ destination_branch }}] {{ title }} (backport #{{ number }})" labels: - backport - - name: backport patches to 7.17 branch + - name: backport patches to 8.0 branch conditions: - merged - - base=main - - label=v7.17.0 + - label=v8.0.0 actions: backport: assignees: - "{{ author }}" branches: - - "7.17" + - "8.0" title: "[{{ destination_branch }}] {{ title }} (backport #{{ number }})" labels: - backport - - name: backport patches to 8.0 branch + - name: backport patches to 7.17 branch conditions: - merged - - base=main - - label=v8.0.0 + - label=v7.17.0 actions: backport: assignees: - "{{ author }}" branches: - - "8.0" + - "7.17" title: "[{{ destination_branch }}] {{ title }} (backport #{{ number }})" labels: - backport diff --git a/docs/AI-for-security/connect-to-byo.asciidoc b/docs/AI-for-security/connect-to-byo.asciidoc index e1a3437a4a..d697d20972 100644 --- a/docs/AI-for-security/connect-to-byo.asciidoc +++ b/docs/AI-for-security/connect-to-byo.asciidoc @@ -10,7 +10,7 @@ This page provides instructions for setting up a connector to a large language m This example uses a single server hosted in GCP to run the following components: -* LM Studio with the https://mistral.ai/technology/#models[Mixtral-8x7b] model +* LM Studio with the https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407[Mistral-Nemo-Instruct-2407] model * A reverse proxy using Nginx to authenticate to Elastic Cloud image::images/lms-studio-arch-diagram.png[Architecture diagram for this guide] @@ -20,7 +20,7 @@ NOTE: For testing, you can use alternatives to Nginx such as https://learn.micro [discrete] == Configure your reverse proxy -NOTE: If your Elastic instance is on the same host as LM Studio, you can skip this step. +NOTE: If your Elastic instance is on the same host as LM Studio, you can skip this step. Also, check out our https://www.elastic.co/blog/herding-llama-3-1-with-elastic-and-lm-studio[blog post] that walks through the whole process of setting up a single-host implementation. You need to set up a reverse proxy to enable communication between LM Studio and Elastic. For more complete instructions, refer to a guide such as https://www.digitalocean.com/community/tutorials/how-to-configure-nginx-as-a-reverse-proxy-on-ubuntu-22-04[this one]. @@ -74,7 +74,14 @@ server { } -------------------------------------------------- -IMPORTANT: If using the example configuration file above, you must replace several values: Replace `` with your actual token, and keep it safe since you'll need it to set up the {elastic-sec} connector. Replace `` with your actual domain name. Update the `proxy_pass` value at the bottom of the configuration if you decide to change the port number in LM Studio to something other than 1234. +[IMPORTANT] +==== +If using the example configuration file above, you must replace several values: + +* Replace `` with your actual token, and keep it safe since you'll need it to set up the {elastic-sec} connector. +* Replace `` with your actual domain name. +* Update the `proxy_pass` value at the bottom of the configuration if you decide to change the port number in LM Studio to something other than 1234. +==== [discrete] === (Optional) Set up performance monitoring for your reverse proxy @@ -85,23 +92,20 @@ You can use Elastic's {integrations-docs}/nginx[Nginx integration] to monitor pe First, install https://lmstudio.ai/[LM Studio]. LM Studio supports the OpenAI SDK, which makes it compatible with Elastic's OpenAI connector, allowing you to connect to any model available in the LM Studio marketplace. -One current limitation of LM Studio is that when it is installed on a server, you must launch the application using its GUI before doing so using the CLI. For example, by using Chrome RDP with an https://cloud.google.com/architecture/chrome-desktop-remote-on-compute-engine[X Window System]. After you've opened the application the first time using the GUI, you can start it by using `sudo lms server start` in the CLI. +You must launch the application using its GUI before doing so using the CLI. For example, use Chrome RDP with an https://cloud.google.com/architecture/chrome-desktop-remote-on-compute-engine[X Window System]. After you've opened the application the first time using the GUI, you can start it by using `sudo lms server start` in the CLI. Once you've launched LM Studio: 1. Go to LM Studio's Search window. -2. Search for an LLM (for example, `Mixtral-8x7B-instruct`). Your chosen model must include `instruct` in its name in order to work with Elastic. -3. Filter your search for "Compatibility Guess" to optimize results for your hardware. Results will be color coded: - * Green means "Full GPU offload possible", which yields the best results. - * Blue means "Partial GPU offload possible", which may work. - * Red for "Likely too large for this machine", which typically will not work. +2. Search for an LLM (for example, `Mistral-Nemo-Instruct-2407`). Your chosen model must include `instruct` in its name in order to work with Elastic. +3. After you find a model, view download options and select a recommended version (green). For best performance, select one with the thumbs-up icon that indicates good performance on your hardware. 4. Download one or more models. IMPORTANT: For security reasons, before downloading a model, verify that it is from a trusted source. It can be helpful to review community feedback on the model (for example using a site like Hugging Face). image::images/lms-model-select.png[The LM Studio model selection interface] -In this example we used https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF[`TheBloke/Mixtral-8x7B-Instruct-v0.1.Q3_K_M.gguf`]. It has 46.7B total parameters, a 32,000 token context window, and uses GGUF https://huggingface.co/docs/transformers/main/en/quantization/overview[quanitization]. For more information about model names and format information, refer to the following table. +In this example we used https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407[`mistralai/Mistral-Nemo-Instruct-2407`]. It has 12B total parameters, a 128,000 token context window, and uses GGUF https://huggingface.co/docs/transformers/main/en/quantization/overview[quanitization]. For more information about model names and format information, refer to the following table. [cols="1,1,1,1", options="header"] |=== @@ -124,18 +128,18 @@ After downloading a model, load it in LM Studio using the GUI or LM Studio's htt [discrete] === Option 1: load a model using the CLI (Recommended) -It is a best practice to download models from the marketplace using the GUI, and then load or unload them using the CLI. The GUI allows you to search for models, whereas the CLI only allows you to import specific paths, but the CLI provides a good interface for loading and unloading. +It is a best practice to download models from the marketplace using the GUI, and then load or unload them using the CLI. The GUI allows you to search for models, whereas the CLI allows you to use `lms get` to search for models. The CLI provides a good interface for loading and unloading. -Use the following commands in your CLI: +Once you've downloaded a model, use the following commands in your CLI: 1. Verify LM Studio is installed: `lms` 2. Check LM Studio's status: `lms status` 3. List all downloaded models: `lms ls` -4. Load a model: `lms load` +4. Load a model: `lms load`. image::images/lms-cli-welcome.png[The CLI interface during execution of initial LM Studio commands] -After the model loads, you should see a `Model loaded successfully` message in the CLI. +After the model loads, you should see a `Model loaded successfully` message in the CLI. image::images/lms-studio-model-loaded-msg.png[The CLI message that appears after a model loads] @@ -156,8 +160,8 @@ Refer to the following video to see how to load a model using LM Studio's GUI. Y diff --git a/docs/AI-for-security/images/lms-cli-welcome.png b/docs/AI-for-security/images/lms-cli-welcome.png index c857d01454..af885b7588 100644 Binary files a/docs/AI-for-security/images/lms-cli-welcome.png and b/docs/AI-for-security/images/lms-cli-welcome.png differ diff --git a/docs/AI-for-security/images/lms-model-select.png b/docs/AI-for-security/images/lms-model-select.png index 454fa2a1ab..38f6168d91 100644 Binary files a/docs/AI-for-security/images/lms-model-select.png and b/docs/AI-for-security/images/lms-model-select.png differ diff --git a/docs/AI-for-security/images/lms-ps-command.png b/docs/AI-for-security/images/lms-ps-command.png index af72b6976c..570acd78f2 100644 Binary files a/docs/AI-for-security/images/lms-ps-command.png and b/docs/AI-for-security/images/lms-ps-command.png differ diff --git a/docs/AI-for-security/images/lms-studio-model-loaded-msg.png b/docs/AI-for-security/images/lms-studio-model-loaded-msg.png index c2e3ec8114..cdd01e994f 100644 Binary files a/docs/AI-for-security/images/lms-studio-model-loaded-msg.png and b/docs/AI-for-security/images/lms-studio-model-loaded-msg.png differ diff --git a/docs/AI-for-security/llm-performance-matrix.asciidoc b/docs/AI-for-security/llm-performance-matrix.asciidoc index c8f9e845c3..abed9dfdb2 100644 --- a/docs/AI-for-security/llm-performance-matrix.asciidoc +++ b/docs/AI-for-security/llm-performance-matrix.asciidoc @@ -13,4 +13,5 @@ This table describes the performance of various large language models (LLMs) for | *Assistant - Knowledge retrieval* | Good | Excellent | Excellent | Excellent | Excellent | Excellent | Great | Excellent | Excellent | *Attack Discovery* | Great | Great | Excellent | Poor | Poor | Great | Poor | Excellent | Poor |=== - \ No newline at end of file + +NOTE: `Excellent` is the best rating, followed by `Great`, then by `Good`, and finally by `Poor`. \ No newline at end of file diff --git a/docs/advanced-entity-analytics/entity-risk-scoring.asciidoc b/docs/advanced-entity-analytics/entity-risk-scoring.asciidoc index 8b9be7a266..52bc6403b2 100644 --- a/docs/advanced-entity-analytics/entity-risk-scoring.asciidoc +++ b/docs/advanced-entity-analytics/entity-risk-scoring.asciidoc @@ -37,6 +37,8 @@ NOTE: Entities without any alerts, or with only `Closed` alerts, are not assigne == How is risk score calculated? . The risk scoring engine runs hourly to aggregate `Open` and `Acknowledged` alerts from the last 30 days. For each entity, the engine processes up to 10,000 alerts. ++ +NOTE: When <>, you can choose to also include `Closed` alerts in risk scoring calculations. . The engine groups alerts by `host.name` or `user.name`, and aggregates the individual alert risk scores (`kibana.alert.risk_score`) such that alerts with higher risk scores contribute more than alerts with lower risk scores. The resulting aggregated risk score is assigned to the **Alerts** category in the entity's <>. diff --git a/docs/advanced-entity-analytics/images/preview-risky-entities.png b/docs/advanced-entity-analytics/images/preview-risky-entities.png index 838ee1a7ff..ce345d40e4 100644 Binary files a/docs/advanced-entity-analytics/images/preview-risky-entities.png and b/docs/advanced-entity-analytics/images/preview-risky-entities.png differ diff --git a/docs/advanced-entity-analytics/images/turn-on-risk-engine.png b/docs/advanced-entity-analytics/images/turn-on-risk-engine.png index 7593e7df10..4bc05a67e0 100644 Binary files a/docs/advanced-entity-analytics/images/turn-on-risk-engine.png and b/docs/advanced-entity-analytics/images/turn-on-risk-engine.png differ diff --git a/docs/advanced-entity-analytics/turn-on-risk-engine.asciidoc b/docs/advanced-entity-analytics/turn-on-risk-engine.asciidoc index 945b2f859c..94fe36a8ef 100644 --- a/docs/advanced-entity-analytics/turn-on-risk-engine.asciidoc +++ b/docs/advanced-entity-analytics/turn-on-risk-engine.asciidoc @@ -29,7 +29,9 @@ image::images/preview-risky-entities.png[Preview of risky entities] If you're installing the risk scoring engine for the first time: . Find **Entity Risk Score** in the navigation menu. -. Turn the **Entity risk score** toggle on. +. On the **Entity Risk Score** page, turn the toggle on. + +You can also choose to include `Closed` alerts in risk scoring calculations and specify a date and time range for the calculation. [role="screenshot"] image::images/turn-on-risk-engine.png[Turn on entity risk scoring] diff --git a/docs/cases/cases-req.asciidoc b/docs/cases/cases-req.asciidoc index 17f12a740e..ec34695c74 100644 --- a/docs/cases/cases-req.asciidoc +++ b/docs/cases/cases-req.asciidoc @@ -54,7 +54,7 @@ once, which creates a user profile. | Give view-only access for cases a| **Read** for the *Security* feature and **All** for the *Cases* feature -NOTE: You can customize the sub-feature privileges to allow access to deleting cases, deleting alerts and comments from cases, and viewing or editing case settings. +NOTE: You can customize the sub-feature privileges to allow access to deleting cases, deleting alerts and comments from cases, viewing or editing case settings, adding case comments and attachments, and re-opening cases. | Revoke all access to cases | **None** for the *Cases* feature under *Security* diff --git a/docs/cloud-native-security/cloud-native-security-index.asciidoc b/docs/cloud-native-security/cloud-native-security-index.asciidoc index 742149aa26..935c121a8b 100644 --- a/docs/cloud-native-security/cloud-native-security-index.asciidoc +++ b/docs/cloud-native-security/cloud-native-security-index.asciidoc @@ -41,6 +41,7 @@ include::cspm.asciidoc[leveloffset=+1] include::cspm-get-started-aws.asciidoc[leveloffset=+2] include::cspm-get-started-gcp.asciidoc[leveloffset=+2] include::cspm-get-started-azure.asciidoc[leveloffset=+2] +include::cspm-permissions.asciidoc[leveloffset=+2] include::cspm-findings.asciidoc[leveloffset=+2] include::cspm-benchmark-rules.asciidoc[leveloffset=+2] include::cspm-cloud-posture-dashboard.asciidoc[leveloffset=+2] diff --git a/docs/cloud-native-security/cspm-get-started-aws.asciidoc b/docs/cloud-native-security/cspm-get-started-aws.asciidoc index 9ac8268747..4bc8f107a7 100644 --- a/docs/cloud-native-security/cspm-get-started-aws.asciidoc +++ b/docs/cloud-native-security/cspm-get-started-aws.asciidoc @@ -10,17 +10,10 @@ This page explains how to get started monitoring the security posture of your cl .Requirements [sidebar] -- +* Minimum privileges vary depending on whether you need to read, write, or manage CSPM data and integrations. Refer to <>. * The CSPM integration is available to all {ecloud} users. On-premise deployments require an https://www.elastic.co/pricing[Enterprise subscription]. * CSPM only works in the `Default` {kib} space. Installing the CSPM integration on a different {kib} space will not work. * CSPM is supported only on AWS, GCP, and Azure commercial cloud platforms, and AWS GovCloud. Other government cloud platforms are not supported. https://github.com/elastic/kibana/issues/new/choose[Click here to request support]. -* `Read` privileges for the following {es} indices: -** `logs-cloud_security_posture.findings_latest-*` -** `logs-cloud_security_posture.scores-*` -* The following {kib} privileges: -** Security: `Read` -** Integrations: `Read` -** Saved Objects Management: `Read` -** Fleet: `All` * The user who gives the CSPM integration AWS permissions must be an AWS account `admin`. -- diff --git a/docs/cloud-native-security/cspm-get-started-azure.asciidoc b/docs/cloud-native-security/cspm-get-started-azure.asciidoc index 865ebf02b0..4e78781323 100644 --- a/docs/cloud-native-security/cspm-get-started-azure.asciidoc +++ b/docs/cloud-native-security/cspm-get-started-azure.asciidoc @@ -10,17 +10,10 @@ This page explains how to get started monitoring the security posture of your cl .Requirements [sidebar] -- +* Minimum privileges vary depending on whether you need to read, write, or manage CSPM data and integrations. Refer to <>. * The CSPM integration is available to all {ecloud} users. On-premise deployments require an https://www.elastic.co/pricing[Enterprise subscription]. * CSPM only works in the `Default` {kib} space. Installing the CSPM integration on a different {kib} space will not work. * CSPM is supported only on AWS, GCP, and Azure commercial cloud platforms, and AWS GovCloud. Other government cloud platforms are not supported. https://github.com/elastic/kibana/issues/new/choose[Click here to request support]. -* `Read` privileges for the following {es} indices: -** `logs-cloud_security_posture.findings_latest-*` -** `logs-cloud_security_posture.scores-*` -* The following {kib} privileges: -** Security: `Read` -** Integrations: `Read` -** Saved Objects Management: `Read` -** Fleet: `All` * The user who gives the CSPM integration permissions in Azure must be an Azure subscription `admin`. -- diff --git a/docs/cloud-native-security/cspm-get-started-gcp.asciidoc b/docs/cloud-native-security/cspm-get-started-gcp.asciidoc index 30d34c74c0..dc5bfca23b 100644 --- a/docs/cloud-native-security/cspm-get-started-gcp.asciidoc +++ b/docs/cloud-native-security/cspm-get-started-gcp.asciidoc @@ -10,17 +10,10 @@ This page explains how to get started monitoring the security posture of your GC .Requirements [sidebar] -- +* Minimum privileges vary depending on whether you need to read, write, or manage CSPM data and integrations. Refer to <>. * The CSPM integration is available to all {ecloud} users. On-premise deployments require an https://www.elastic.co/pricing[Enterprise subscription]. * CSPM only works in the `Default` {kib} space. Installing the CSPM integration on a different {kib} space will not work. * CSPM is supported only on AWS, GCP, and Azure commercial cloud platforms, and AWS GovCloud. Other government cloud platforms are not supported. https://github.com/elastic/kibana/issues/new/choose[Click here to request support]. -* `Read` privileges for the following {es} indices: -** `logs-cloud_security_posture.findings_latest-*` -** `logs-cloud_security_posture.scores-*` -* The following {kib} privileges: -** Security: `Read` -** Integrations: `Read` -** Saved Objects Management: `Read` -** Fleet: `All` * The user who gives the CSPM integration GCP permissions must be a GCP project `admin`. -- diff --git a/docs/cloud-native-security/cspm-permissions.asciidoc b/docs/cloud-native-security/cspm-permissions.asciidoc new file mode 100644 index 0000000000..c79a6fd36c --- /dev/null +++ b/docs/cloud-native-security/cspm-permissions.asciidoc @@ -0,0 +1,61 @@ +[[cspm-required-permissions]] += CSPM privilege requirements + +This page lists required privileges for {elastic-sec}'s CSPM features. There are three access levels: read, write, and manage. Each access level and its requirements are described below. + +[discrete] +== Read + +Users with these minimum permissions can view data on the **Findings** page and the Cloud Posture dashboard. + +[discrete] +=== {es} index privileges +`Read` privileges for the following {es} indices: + +* `logs-cloud_security_posture.findings_latest-*` +* `logs-cloud_security_posture.scores-*` + +[discrete] +=== {kib} privileges + +* `Security: Read` + + +[discrete] +== Write + +Users with these minimum permissions can view data on the **Findings** page and the Cloud Posture dashboard, create detection rules from the findings details flyout, and enable or disable benchmark rules. + +[discrete] +=== {es} index privileges +`Read` privileges for the following {es} indices: + +* `logs-cloud_security_posture.findings_latest-*` +* `logs-cloud_security_posture.scores-*` + +[discrete] +=== {kib} privileges + +* `Security: All` + + +[discrete] +== Manage + +Users with these minimum permissions can view data on the **Findings** page and the Cloud Posture dashboard, create detection rules from the findings details flyout, enable or disable benchmark rules, and install, update, or uninstall CSPM integrations and assets. + +[discrete] +=== {es} index privileges +`Read` privileges for the following {es} indices: + +* `logs-cloud_security_posture.findings_latest-*` +* `logs-cloud_security_posture.scores-*` + +[discrete] +=== {kib} privileges + +* `Security: All` +* `Spaces: All` +* `Fleet: All` +* `Integrations: All` + diff --git a/docs/cloud-native-security/environment-variable-capture.asciidoc b/docs/cloud-native-security/environment-variable-capture.asciidoc index ec05a561b8..36ecbd0f89 100644 --- a/docs/cloud-native-security/environment-variable-capture.asciidoc +++ b/docs/cloud-native-security/environment-variable-capture.asciidoc @@ -28,9 +28,6 @@ To set up environment variable capture for an {agent} policy: . Enter the names of env vars you want to capture, separated by commas. For example: `PATH,USER` . Click *Save*. -[role="screenshot"] -image::images/env-var-capture.png[The "linux.advanced.capture_env_vars" advanced agent policy setting] - [[find-cap-env-vars]] [discrete] == Find captured environment variables diff --git a/docs/cloud-native-security/images/env-var-capture.png b/docs/cloud-native-security/images/env-var-capture.png deleted file mode 100644 index d62ca4149c..0000000000 Binary files a/docs/cloud-native-security/images/env-var-capture.png and /dev/null differ diff --git a/docs/detections/detection-engine-intro.asciidoc b/docs/detections/detection-engine-intro.asciidoc index b522d3f4bd..105e2ade3e 100644 --- a/docs/detections/detection-engine-intro.asciidoc +++ b/docs/detections/detection-engine-intro.asciidoc @@ -167,3 +167,9 @@ and you should contact your {kib} administrator. NOTE: For *self-managed* {stack} deployments only, this message may be displayed when the <> setting is not enabled in the `elasticsearch.yml` file. For more information, refer to <>. + +[discrete] +[[detections-logsdb-index-mode]] +== Using logsdb index mode + +To learn how your rules and alerts are affected by using the {ref}/logs-data-stream.html[logsdb index mode], refer to <>. \ No newline at end of file diff --git a/docs/detections/detections-index.asciidoc b/docs/detections/detections-index.asciidoc index 2d1cab74d6..ff45aa827b 100644 --- a/docs/detections/detections-index.asciidoc +++ b/docs/detections/detections-index.asciidoc @@ -2,6 +2,8 @@ include::detection-engine-intro.asciidoc[] include::detections-req.asciidoc[leveloffset=+1] +include::detections-logsdb-impact.asciidoc[leveloffset=+1] + include::about-rules.asciidoc[] diff --git a/docs/detections/detections-logsdb-impact.asciidoc b/docs/detections/detections-logsdb-impact.asciidoc new file mode 100644 index 0000000000..a245644800 --- /dev/null +++ b/docs/detections/detections-logsdb-impact.asciidoc @@ -0,0 +1,67 @@ +[[detections-logsdb-index-mode-impact]] += Using logsdb index mode with {elastic-sec} + +NOTE: To use the {ref}/mapping-source-field.html#synthetic-source[synthetic `_source`] feature, you must have the appropriate subscription. Refer to the subscription page for https://www.elastic.co/subscriptions/cloud[Elastic Cloud] and {subscriptions}[Elastic Stack/self-managed] for the breakdown of available features and their associated subscription tiers. + +This topic explains the impact of using logsdb index mode with {elastic-sec}. + +With logsdb index mode, the original `_source` field is not stored in the index but can be reconstructed using {ref}/mapping-source-field.html#synthetic-source[synthetic `_source`]. + +When the `_source` is reconstructed, {ref}/mapping-source-field.html#synthetic-source-modifications[modifications] are possible. Therefore, there could be a mismatch between users' expectations and how fields are formatted. + +Continue reading to find out how this affects specific {elastic-sec} components. + +NOTE: Logsdb is not recommended for {elastic-sec} at this time. Users must fully understand and accept the documented changes to detection alert documents (see below), and ensure their deployment has excess hot data tier CPU resource capacity before enabling logsdb mode, as logsdb mode requires additional CPU resources during the ingest/indexing process. Enabling logsdb without sufficient hot data tier CPU may result in data ingestion backups and/or security detection rule timeouts and errors. + +[discrete] +[[logsdb-alerts]] +== Alerts + +When alerts are generated, the `_source` event is copied into the alert to retain the original data. When the logsdb index mode is applied, the `_source` event stored in the alert is reconstructed using synthetic `_source`. + +If you're switching to use logsdb index mode, the `_source` field stored in the alert might look different in certain situations: + +* {ref}/mapping-source-field.html#synthetic-source-modifications-leaf-arrays[Arrays can be reconstructed differently or deduplicated] +* {ref}/mapping-source-field.html#synthetic-source-modifications-field-names[Field names] +* `geo_point` data fields (refer to {ref}/mapping-source-field.html#synthetic-source-modifications-ranges[Representation of ranges] and {ref}/mapping-source-field.html#synthetic-source-precision-loss-for-point-types[Reduced precision of `geo_point` values] for more information) + +Alerts generated by the following rule types could be affected: + +* Custom query +* Event correlation (non-sequence only) +* Non-aggregate rule types (for example, {esql} rules that use non-aggregating queries) + +Alerts that are generated by threshold, {ml}, and event correlation sequence rules are not affected since they do not contain copies of the original source. + +[discrete] +[[logsdb-rule-actions]] +== Rule actions + +While we do not recommend using `_source` for actions, in cases where the action relies on the `_source`, the same limitations and changes apply. + +If you send alert notifications by enabling {kibana-ref}/alerting-getting-started.html#alerting-concepts-actions[actions] to the external systems that have workflows or automations based on fields formatted from the original source, they may be affected. In particular, this can happen when the fields used are arrays of objects. + +We recommend checking and adjusting the rule actions using `_source` before switching to logsdb index mode. + +[discrete] +[[logsdb-runtime-fields]] +== Runtime fields + +Runtime fields that reference `_source` may be affected. Some runtime fields might not work and need to be adjusted. For example, if an event was indexed with the value of `agent.name` in the dot-notation form, it will be returned in the nested form and might not work. + +The following is an example of accessing `_source` that works with the logsdb index mode enabled: + +[source,console] +---- +"source": """ emit(params._source.agent.name + "_____" + doc['agent.name'].value ); """ +"source": """ emit(params._source['agent']['name'] + "_____" + doc['agent.name'].value ); """ +"source": """ emit(field('agent.name').get(null) + "_____" + doc['agent.name'].value ); """ +"source": """ emit($('agent.name', null) + "_____" + doc['agent.name'].value ); """ +---- + +The following will not work with synthetic source (logsdb index mode enabled): + +[source,console] +---- +"source": """ emit(params._source['agent.name'] + "_____" + doc['agent.name'].value ); """ +---- diff --git a/docs/getting-started/agent-tamper-protection.asciidoc b/docs/getting-started/agent-tamper-protection.asciidoc index 5728169a87..423c5c4cae 100644 --- a/docs/getting-started/agent-tamper-protection.asciidoc +++ b/docs/getting-started/agent-tamper-protection.asciidoc @@ -48,3 +48,5 @@ If you need the uninstall token to remove {agent} from an endpoint, you can find ** Click the *Show token* icon in the *Token* column to reveal a specific token. ** Click the *View uninstall command* icon in the *Actions* column to open the *Uninstall agent* flyout, containing the full uninstall command with the token. + +TIP: If you have many tamper-protected {agent} policies, you may want to <> in a single command. \ No newline at end of file diff --git a/docs/getting-started/uninstall-agent.asciidoc b/docs/getting-started/uninstall-agent.asciidoc index 25f940d8e2..1b00a45d00 100644 --- a/docs/getting-started/uninstall-agent.asciidoc +++ b/docs/getting-started/uninstall-agent.asciidoc @@ -5,7 +5,6 @@ To uninstall {agent} from a host, run the `uninstall` command from the directory If <> is enabled on the Agent policy for the host, you'll need to include the uninstall token in the command, using the `--uninstall-token` flag. You can <> on the Agent policy. Alternatively, find *{fleet}* in the navigation menu or by using the {kibana-ref}/introduction.html#kibana-navigation-search[global search field], and select *Uninstall tokens*. - For example, to uninstall {agent} on a macOS or Linux host: [source,shell] @@ -13,6 +12,36 @@ For example, to uninstall {agent} on a macOS or Linux host: sudo elastic-agent uninstall --uninstall-token 12345678901234567890123456789012 ---------------------------------- +[discrete] +[[multiple-uninstall-tokens]] +== Provide multiple uninstall tokens + +If you have multiple tamper-protected {agent} policies, you may want to provide multiple uninstall tokens in a single command. There are two ways to do this: + +* The `--uninstall-token` command can receive multiple uninstall tokens separated by a comma, without spaces. ++ +[source,shell] +---------------------------------- +sudo elastic-agent uninstall -f --uninstall-token 7b3d364db8e0deb1cda696ae85e42644,a7336b71e243e7c92d9504b04a774266 +---------------------------------- + +* `--uninstall-token`'s argument can also be a path to a text file with one uninstall token per line. ++ +NOTE: You must use the full file path, otherwise the file may not be found. ++ +[source,shell] +---------------------------------- +sudo elastic-agent uninstall -f --uninstall-token /tmp/tokens.txt +---------------------------------- ++ +In this example, `tokens.txt` would contain: ++ +[source,txt] +---------------------------------- +7b3d364db8e0deb1cda696ae85e42644 +a7336b71e243e7c92d9504b04a774266 +---------------------------------- + [discrete] [[uninstall-endpoint]] == Uninstall {elastic-endpoint} diff --git a/docs/release-notes.asciidoc b/docs/release-notes.asciidoc index 8e84877110..511b9e59bd 100644 --- a/docs/release-notes.asciidoc +++ b/docs/release-notes.asciidoc @@ -1,5 +1,8 @@ [[release-notes]] -[chapter] = Release notes This section summarizes the changes in each release. + +* <> + +include::release-notes/9.0.asciidoc[] \ No newline at end of file diff --git a/docs/release-notes/9.0.asciidoc b/docs/release-notes/9.0.asciidoc new file mode 100644 index 0000000000..684005c91a --- /dev/null +++ b/docs/release-notes/9.0.asciidoc @@ -0,0 +1,32 @@ +[[release-notes-header-9.0.0]] +== 9.0 + +coming::[9.0.0] + +[discrete] +[[release-notes-9.0.0]] +=== 9.0.0 + +[discrete] +[[known-issue-9.0.0]] +==== Known issues + +[discrete] +[[breaking-changes-9.0.0]] +==== Breaking changes + +[discrete] +[[deprecations-9.0.0]] +==== Deprecations + +[discrete] +[[features-9.0.0]] +==== New features + +[discrete] +[[enhancements-9.0.0]] +==== Enhancements + +[discrete] +[[bug-fixes-9.0.0]] +==== Bug fixes diff --git a/docs/serverless/AI-for-security/connect-to-byo-llm.asciidoc b/docs/serverless/AI-for-security/connect-to-byo-llm.asciidoc index 6f5d6fbb3d..09a2dd88d5 100644 --- a/docs/serverless/AI-for-security/connect-to-byo-llm.asciidoc +++ b/docs/serverless/AI-for-security/connect-to-byo-llm.asciidoc @@ -1,39 +1,33 @@ -[[security-connect-to-byo-llm]] +[[connect-to-byo-llm]] = Connect to your own local LLM -// :description: Set up a connector to LM Studio so you can use a local model with AI Assistant. -// :keywords: security, overview, get-started +:frontmatter-description: Set up a connector to LM Studio so you can use a local model with AI Assistant. +:frontmatter-tags-products: [security] +:frontmatter-tags-content-type: [guide] +:frontmatter-tags-user-goals: [get-started] -This page provides instructions for setting up a connector to a large language model (LLM) of your choice using LM Studio. This allows you to use your chosen model within {elastic-sec}. You'll first need to set up a reverse proxy to communicate with {elastic-sec}, then set up LM Studio on a server, and finally configure the connector in your {elastic-sec} project. https://www.elastic.co/blog/ai-assistant-locally-hosted-models[Learn more about the benefits of using a local LLM]. +This page provides instructions for setting up a connector to a large language model (LLM) of your choice using LM Studio. This allows you to use your chosen model within {elastic-sec}. You'll first need to set up a reverse proxy to communicate with {elastic-sec}, then set up LM Studio on a server, and finally configure the connector in your Elastic deployment. https://www.elastic.co/blog/ai-assistant-locally-hosted-models[Learn more about the benefits of using a local LLM]. This example uses a single server hosted in GCP to run the following components: -* LM Studio with the https://mistral.ai/technology/#models[Mixtral-8x7b] model +* LM Studio with the https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407[Mistral-Nemo-Instruct-2407] model * A reverse proxy using Nginx to authenticate to Elastic Cloud -[role="screenshot"] image::images/lms-studio-arch-diagram.png[Architecture diagram for this guide] -[NOTE] -==== -For testing, you can use alternatives to Nginx such as https://learn.microsoft.com/en-us/azure/developer/dev-tunnels/overview[Azure Dev Tunnels] or https://ngrok.com/[Ngrok], but using Nginx makes it easy to collect additional telemetry and monitor its status by using Elastic's native Nginx integration. While this example uses cloud infrastructure, it could also be replicated locally without an internet connection. -==== +NOTE: For testing, you can use alternatives to Nginx such as https://learn.microsoft.com/en-us/azure/developer/dev-tunnels/overview[Azure Dev Tunnels] or https://ngrok.com/[Ngrok], but using Nginx makes it easy to collect additional telemetry and monitor its status by using Elastic's native Nginx integration. While this example uses cloud infrastructure, it could also be replicated locally without an internet connection. [discrete] -[[security-connect-to-byo-llm-configure-your-reverse-proxy]] == Configure your reverse proxy -[NOTE] -==== -If your Elastic instance is on the same host as LM Studio, you can skip this step. -==== +NOTE: If your Elastic instance is on the same host as LM Studio, you can skip this step. Also, check out our https://www.elastic.co/blog/herding-llama-3-1-with-elastic-and-lm-studio[blog post] that walks through the whole process of setting up a single-host implementation. You need to set up a reverse proxy to enable communication between LM Studio and Elastic. For more complete instructions, refer to a guide such as https://www.digitalocean.com/community/tutorials/how-to-configure-nginx-as-a-reverse-proxy-on-ubuntu-22-04[this one]. The following is an example Nginx configuration file: [source,txt] ----- +-------------------------------------------------- server { listen 80; listen [::]:80; @@ -69,67 +63,57 @@ server { ssl_trusted_certificate /etc/letsencrypt/live//fullchain.pem; resolver 1.1.1.1; location / { - + if ($http_authorization != "Bearer ") { return 401; -} +} proxy_pass http://localhost:1234/; } } ----- +-------------------------------------------------- [IMPORTANT] ==== +If using the example configuration file above, you must replace several values: + * Replace `` with your actual token, and keep it safe since you'll need it to set up the {elastic-sec} connector. * Replace `` with your actual domain name. * Update the `proxy_pass` value at the bottom of the configuration if you decide to change the port number in LM Studio to something other than 1234. ==== [discrete] -[[security-connect-to-byo-llm-optional-set-up-performance-monitoring-for-your-reverse-proxy]] === (Optional) Set up performance monitoring for your reverse proxy - -You can use Elastic's https://www.elastic.co/docs/current/integrations/nginx[Nginx integration] to monitor performance and populate monitoring dashboards in the {security-app}. +You can use Elastic's {integrations-docs}/nginx[Nginx integration] to monitor performance and populate monitoring dashboards in the {security-app}. [discrete] -[[security-connect-to-byo-llm-configure-lm-studio-and-download-a-model]] == Configure LM Studio and download a model First, install https://lmstudio.ai/[LM Studio]. LM Studio supports the OpenAI SDK, which makes it compatible with Elastic's OpenAI connector, allowing you to connect to any model available in the LM Studio marketplace. -One current limitation of LM Studio is that when it is installed on a server, you must launch the application using its GUI before doing so using the CLI. For example, by using Chrome RDP with an https://cloud.google.com/architecture/chrome-desktop-remote-on-compute-engine[X Window System]. After you've opened the application the first time using the GUI, you can start it by using `sudo lms server start` in the CLI. +You must launch the application using its GUI before doing so using the CLI. For example, use Chrome RDP with an https://cloud.google.com/architecture/chrome-desktop-remote-on-compute-engine[X Window System]. After you've opened the application the first time using the GUI, you can start it by using `sudo lms server start` in the CLI. -Once you've launched LM Studio: +Once you've launched LM Studio: -. Go to LM Studio's Search window. -. Search for an LLM (for example, `Mixtral-8x7B-instruct`). Your chosen model must include `instruct` in its name in order to work with Elastic. -. Filter your search for "Compatibility Guess" to optimize results for your hardware. Results will be color coded: -+ -** Green means "Full GPU offload possible", which yields the best results. -** Blue means "Partial GPU offload possible", which may work. -** Red for "Likely too large for this machine", which typically will not work. -. Download one or more models. +1. Go to LM Studio's Search window. +2. Search for an LLM (for example, `Mistral-Nemo-Instruct-2407`). Your chosen model must include `instruct` in its name in order to work with Elastic. +3. After you find a model, view download options and select a recommended version (green). For best performance, select one with the thumbs-up icon that indicates good performance on your hardware. +4. Download one or more models. -[IMPORTANT] -==== -For security reasons, before downloading a model, verify that it is from a trusted source. It can be helpful to review community feedback on the model (for example using a site like Hugging Face). -==== +IMPORTANT: For security reasons, before downloading a model, verify that it is from a trusted source. It can be helpful to review community feedback on the model (for example using a site like Hugging Face). -[role="screenshot"] image::images/lms-model-select.png[The LM Studio model selection interface] -In this example we used https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF[`TheBloke/Mixtral-8x7B-Instruct-v0.1.Q3_K_M.gguf`]. It has 46.7B total parameters, a 32,000 token context window, and uses GGUF https://huggingface.co/docs/transformers/main/en/quantization/overview[quanitization]. For more information about model names and format information, refer to the following table. +In this example we used https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407[`mistralai/Mistral-Nemo-Instruct-2407`]. It has 12B total parameters, a 128,000 token context window, and uses GGUF https://huggingface.co/docs/transformers/main/en/quantization/overview[quanitization]. For more information about model names and format information, refer to the following table. +[cols="1,1,1,1", options="header"] |=== -| Model Name| Parameter Size| Tokens/Context Window| Quantization Format - +| Model Name | Parameter Size | Tokens/Context Window | Quantization Format | Name of model, sometimes with a version number. | LLMs are often compared by their number of parameters — higher numbers mean more powerful models. | Tokens are small chunks of input information. Tokens do not necessarily correspond to characters. You can use https://platform.openai.com/tokenizer[Tokenizer] to see how many tokens a given prompt might contain. | Quantization reduces overall parameters and helps the model to run faster, but reduces accuracy. - | Examples: Llama, Mistral, Phi-3, Falcon. | The number of parameters is a measure of the size and the complexity of the model. The more parameters a model has, the more data it can process, learn from, generate, and predict. | The context window defines how much information the model can process at once. If the number of input tokens exceeds this limit, input gets truncated. @@ -137,87 +121,77 @@ In this example we used https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0 |=== [discrete] -[[security-connect-to-byo-llm-load-a-model-in-lm-studio]] -== Load a model in LM Studio +== Load a model in LM Studio After downloading a model, load it in LM Studio using the GUI or LM Studio's https://lmstudio.ai/blog/lms[CLI tool]. [discrete] -[[security-connect-to-byo-llm-option-1-load-a-model-using-the-cli-recommended]] === Option 1: load a model using the CLI (Recommended) -It is a best practice to download models from the marketplace using the GUI, and then load or unload them using the CLI. The GUI allows you to search for models, whereas the CLI only allows you to import specific paths, but the CLI provides a good interface for loading and unloading. +It is a best practice to download models from the marketplace using the GUI, and then load or unload them using the CLI. The GUI allows you to search for models, whereas the CLI allows you to use `lms get` to search for models. The CLI provides a good interface for loading and unloading. -Use the following commands in your CLI: +Once you've downloaded a model, use the following commands in your CLI: -. Verify LM Studio is installed: `lms` -. Check LM Studio's status: `lms status` -. List all downloaded models: `lms ls` -. Load a model: `lms load` +1. Verify LM Studio is installed: `lms` +2. Check LM Studio's status: `lms status` +3. List all downloaded models: `lms ls` +4. Load a model: `lms load`. -[role="screenshot"] image::images/lms-cli-welcome.png[The CLI interface during execution of initial LM Studio commands] -After the model loads, you should see a `Model loaded successfully` message in the CLI. +After the model loads, you should see a `Model loaded successfully` message in the CLI. -[role="screenshot"] image::images/lms-studio-model-loaded-msg.png[The CLI message that appears after a model loads] To verify which model is loaded, use the `lms ps` command. -[role="screenshot"] image::images/lms-ps-command.png[The CLI message that appears after running lms ps] If your model uses NVIDIA drivers, you can check the GPU performance with the `sudo nvidia-smi` command. [discrete] -[[security-connect-to-byo-llm-option-2-load-a-model-using-the-gui]] === Option 2: load a model using the GUI Refer to the following video to see how to load a model using LM Studio's GUI. You can change the **port** setting, which is referenced in the Nginx configuration file. Note that the **GPU offload** was set to **Max**. +======= ++++ - + +
++++ +======= [discrete] -[[security-connect-to-byo-llm-optional-collect-logs-using-elastics-custom-logs-integration]] == (Optional) Collect logs using Elastic's Custom Logs integration -You can monitor the performance of the host running LM Studio using Elastic's https://www.elastic.co/docs/current/integrations/log[Custom Logs integration]. This can also help with troubleshooting. Note that the default path for LM Studio logs is `/tmp/lmstudio-server-log.txt`, as in the following screenshot: +You can monitor the performance of the host running LM Studio using Elastic's {integrations-docs}/log[Custom Logs integration]. This can also help with troubleshooting. Note that the default path for LM Studio logs is `/tmp/lmstudio-server-log.txt`, as in the following screenshot: -[role="screenshot"] image::images/lms-custom-logs-config.png[The configuration window for the custom logs integration] [discrete] -[[security-connect-to-byo-llm-configure-the-connector-in-elastic-sec]] -== Configure the connector in {elastic-sec} +== Configure the connector in your Elastic deployment -Finally, configure the connector in your Security project: +Finally, configure the connector: -. Log in to your Security project. -. Find **Connectors** in the navigation menu or use the global search field. Then click **Create Connector**, and select **OpenAI**. The OpenAI connector enables this use case because LM Studio uses the OpenAI SDK. -. Name your connector to help keep track of the model version you are using. -. Under **Select an OpenAI provider**, select **Other (OpenAI Compatible Service)**. -. Under **URL**, enter the domain name specified in your Nginx configuration file, followed by `/v1/chat/completions`. -. Under **Default model**, enter `local-model`. -. Under **API key**, enter the secret token specified in your Nginx configuration file. -. Click **Save**. +1. Log in to your Elastic deployment. +2. Find the **Connectors** page in the navigation menu or use the {kibana-ref}/introduction.html#kibana-navigation-search[global search field]. Then click **Create Connector**, and select **OpenAI**. The OpenAI connector enables this use case because LM Studio uses the OpenAI SDK. +3. Name your connector to help keep track of the model version you are using. +4. Under **Select an OpenAI provider**, select **Other (OpenAI Compatible Service)**. +5. Under **URL**, enter the domain name specified in your Nginx configuration file, followed by `/v1/chat/completions`. +6. Under **Default model**, enter `local-model`. +7. Under **API key**, enter the secret token specified in your Nginx configuration file. +8. Click **Save**. -[role="screenshot"] image::images/lms-edit-connector.png[The Edit connector page in the {security-app}, with appropriate values populated] -Setup is now complete. You can use the model you've loaded in LM Studio to power Elastic's generative AI features. You can test a variety of models as you interact with AI Assistant to see what works best without having to update your connector. +Setup is now complete. You can use the model you've loaded in LM Studio to power Elastic's generative AI features. You can test a variety of models as you interact with AI Assistant to see what works best without having to update your connector. -[NOTE] -==== -While local models work well for <>, we recommend you use one of <> for interacting with <>. As local models become more performant over time, this is likely to change. -==== +NOTE: While local models work well for <>, we recommend you use one of <> for interacting with <>. As local models become more performant over time, this is likely to change. diff --git a/docs/serverless/AI-for-security/images/lms-cli-welcome.png b/docs/serverless/AI-for-security/images/lms-cli-welcome.png index c857d01454..af885b7588 100644 Binary files a/docs/serverless/AI-for-security/images/lms-cli-welcome.png and b/docs/serverless/AI-for-security/images/lms-cli-welcome.png differ diff --git a/docs/serverless/AI-for-security/images/lms-model-select.png b/docs/serverless/AI-for-security/images/lms-model-select.png index 454fa2a1ab..38f6168d91 100644 Binary files a/docs/serverless/AI-for-security/images/lms-model-select.png and b/docs/serverless/AI-for-security/images/lms-model-select.png differ diff --git a/docs/serverless/AI-for-security/images/lms-ps-command.png b/docs/serverless/AI-for-security/images/lms-ps-command.png index af72b6976c..570acd78f2 100644 Binary files a/docs/serverless/AI-for-security/images/lms-ps-command.png and b/docs/serverless/AI-for-security/images/lms-ps-command.png differ diff --git a/docs/serverless/AI-for-security/images/lms-studio-model-loaded-msg.png b/docs/serverless/AI-for-security/images/lms-studio-model-loaded-msg.png index c2e3ec8114..cdd01e994f 100644 Binary files a/docs/serverless/AI-for-security/images/lms-studio-model-loaded-msg.png and b/docs/serverless/AI-for-security/images/lms-studio-model-loaded-msg.png differ diff --git a/docs/serverless/AI-for-security/llm-connector-guides.asciidoc b/docs/serverless/AI-for-security/llm-connector-guides.asciidoc index b77480d407..c238871c41 100644 --- a/docs/serverless/AI-for-security/llm-connector-guides.asciidoc +++ b/docs/serverless/AI-for-security/llm-connector-guides.asciidoc @@ -12,4 +12,4 @@ Setup guides are available for the following LLM providers: * <> * <> * <> -* <> +* <> diff --git a/docs/serverless/AI-for-security/llm-performance-matrix.asciidoc b/docs/serverless/AI-for-security/llm-performance-matrix.asciidoc index 193ea061ef..95a428dfe1 100644 --- a/docs/serverless/AI-for-security/llm-performance-matrix.asciidoc +++ b/docs/serverless/AI-for-security/llm-performance-matrix.asciidoc @@ -17,4 +17,5 @@ This table describes the performance of various large language models (LLMs) for | *Assistant - Knowledge retrieval* | Good | Excellent | Excellent | Excellent | Excellent | Excellent | Great | Excellent | Excellent | *Attack Discovery* | Great | Great | Excellent | Poor | Poor | Great | Poor | Excellent | Poor |=== - \ No newline at end of file + +NOTE: `Excellent` is the best rating, followed by `Great`, then by `Good`, and finally by `Poor`. \ No newline at end of file diff --git a/docs/serverless/advanced-entity-analytics/entity-risk-scoring.asciidoc b/docs/serverless/advanced-entity-analytics/entity-risk-scoring.asciidoc index 7c17c3dee1..f1903a9d9c 100644 --- a/docs/serverless/advanced-entity-analytics/entity-risk-scoring.asciidoc +++ b/docs/serverless/advanced-entity-analytics/entity-risk-scoring.asciidoc @@ -39,6 +39,8 @@ Entities without any alerts, or with only `Closed` alerts, are not assigned a ri == How is risk score calculated? . The risk scoring engine runs hourly to aggregate `Open` and `Acknowledged` alerts from the last 30 days. For each entity, the engine processes up to 10,000 alerts. ++ +NOTE: When <>, you can choose to also include `Closed` alerts in risk scoring calculations. . The engine groups alerts by `host.name` or `user.name`, and aggregates the individual alert risk scores (`kibana.alert.risk_score`) such that alerts with higher risk scores contribute more than alerts with lower risk scores. The resulting aggregated risk score is assigned to the **Alerts** category in the entity's <>. . The engine then verifies the entity's <>. If there is no asset criticality assigned, the entity risk score remains equal to the aggregated score from the **Alerts** category. If a criticality level is assigned, the engine updates the risk score based on the default risk weight for each criticality level. The asset criticality risk input is assigned to the **Asset Criticality** category in the entity's risk summary. + diff --git a/docs/serverless/advanced-entity-analytics/turn-on-risk-engine.asciidoc b/docs/serverless/advanced-entity-analytics/turn-on-risk-engine.asciidoc index 2462493115..a2f041d7f9 100644 --- a/docs/serverless/advanced-entity-analytics/turn-on-risk-engine.asciidoc +++ b/docs/serverless/advanced-entity-analytics/turn-on-risk-engine.asciidoc @@ -43,7 +43,9 @@ To view risk score data, you must have alerts generated in your environment. If you're installing the risk scoring engine for the first time: . Go to **Project settings** → **Management** → **Entity Risk Score**. -. Turn the **Entity risk score** toggle on. +. On the **Entity Risk Score** page, turn the toggle on. + +You can also choose to include `Closed` alerts in risk scoring calculations and specify a date and time range for the calculation. [role="screenshot"] image::images/turn-on-risk-engine/turn-on-risk-engine.png[Turn on entity risk scoring] diff --git a/docs/serverless/alerts/visual-event-analyzer.asciidoc b/docs/serverless/alerts/visual-event-analyzer.asciidoc index 631a2eefb4..7f1311c44e 100644 --- a/docs/serverless/alerts/visual-event-analyzer.asciidoc +++ b/docs/serverless/alerts/visual-event-analyzer.asciidoc @@ -7,11 +7,6 @@ {elastic-sec} allows any event detected by {elastic-endpoint} to be analyzed using a process-based visual analyzer, which shows a graphical timeline of processes that led up to the alert and the events that occurred immediately after. Examining events in the visual event analyzer is useful to determine the origin of potentially malicious activity and other areas in your environment that may be compromised. It also enables security analysts to drill down into all related hosts, processes, and other events to aid in their investigations. -[TIP] -==== -If you're experiencing performance degradation, you can <> from analyzer queries. -==== - [discrete] [[find-events-analyze]] == Find events to analyze diff --git a/docs/serverless/cloud-native-security/cspm-get-started-azure.asciidoc b/docs/serverless/cloud-native-security/cspm-get-started-azure.asciidoc index b04d071412..af4ef43ea4 100644 --- a/docs/serverless/cloud-native-security/cspm-get-started-azure.asciidoc +++ b/docs/serverless/cloud-native-security/cspm-get-started-azure.asciidoc @@ -14,13 +14,9 @@ This page explains how to get started monitoring the security posture of your cl .Requirements [NOTE] ==== +* Minimum privileges vary depending on whether you need to read, write, or manage CSPM data and integrations. Refer to <>. * CSPM only works in the `Default` {kib} space. Installing the CSPM integration on a different {kib} space will not work. * CSPM is supported only on AWS, GCP, and Azure commercial cloud platforms, and AWS GovCloud. Other government cloud platforms are not supported (https://github.com/elastic/kibana/issues/new/choose[request support]). -* To view posture data, you need `read` privileges for the following {es} indices: -+ -** `logs-cloud_security_posture.findings_latest-*` -** `logs-cloud_security_posture.scores-*` -** `logs-cloud_security_posture.findings` * The user who gives the CSPM integration permissions in Azure must be an Azure subscription `admin`. ==== diff --git a/docs/serverless/cloud-native-security/cspm-get-started-gcp.asciidoc b/docs/serverless/cloud-native-security/cspm-get-started-gcp.asciidoc index 2f72852609..766a5ccff0 100644 --- a/docs/serverless/cloud-native-security/cspm-get-started-gcp.asciidoc +++ b/docs/serverless/cloud-native-security/cspm-get-started-gcp.asciidoc @@ -14,13 +14,9 @@ This page explains how to get started monitoring the security posture of your cl .Requirements [NOTE] ==== +* Minimum privileges vary depending on whether you need to read, write, or manage CSPM data and integrations. Refer to <>. * CSPM only works in the `Default` {kib} space. Installing the CSPM integration on a different {kib} space will not work. * CSPM is supported only on AWS, GCP, and Azure commercial cloud platforms, and AWS GovCloud. Other government cloud platforms are not supported (https://github.com/elastic/kibana/issues/new/choose[request support]). -* To view posture data, you need the appropriate user role to read the following {es} indices: -+ -** `logs-cloud_security_posture.findings_latest-*` -** `logs-cloud_security_posture.scores-*` -** `Logs-cloud_security_posture.findings` * The user who gives the CSPM integration GCP permissions must be a GCP project `admin`. ==== diff --git a/docs/serverless/cloud-native-security/cspm-get-started.asciidoc b/docs/serverless/cloud-native-security/cspm-get-started.asciidoc index aad18b1033..c586b1964f 100644 --- a/docs/serverless/cloud-native-security/cspm-get-started.asciidoc +++ b/docs/serverless/cloud-native-security/cspm-get-started.asciidoc @@ -14,13 +14,9 @@ This page explains how to get started monitoring the security posture of your cl .Requirements [NOTE] ==== +* Minimum privileges vary depending on whether you need to read, write, or manage CSPM data and integrations. Refer to <>. * CSPM only works in the `Default` {kib} space. Installing the CSPM integration on a different {kib} space will not work. * CSPM is supported only on AWS, GCP, and Azure commercial cloud platforms, and AWS GovCloud. Other government cloud platforms are not supported (https://github.com/elastic/kibana/issues/new/choose[request support]). -* To view posture data, you need the appropriate user role to read the following {es} indices: -+ -** `logs-cloud_security_posture.findings_latest-*` -** `logs-cloud_security_posture.scores-*` -** `Logs-cloud_security_posture.findings` * The user who gives the CSPM integration AWS permissions must be an AWS account `admin`. ==== diff --git a/docs/serverless/cloud-native-security/cspm-permissions.asciidoc b/docs/serverless/cloud-native-security/cspm-permissions.asciidoc new file mode 100644 index 0000000000..56459ec6b3 --- /dev/null +++ b/docs/serverless/cloud-native-security/cspm-permissions.asciidoc @@ -0,0 +1,61 @@ +[[cspm-required-permissions]] += CSPM privilege requirements + +This page lists required privilges for {elastic-sec}'s CSPM features. There are three access levels: read, write, and manage. Each access level and its requirements are described below. + +[discrete] +== Read + +Users with these minimum permissions can view data on the **Findings** page and the Cloud Posture dashboard. + +[discrete] +=== {es} index privileges +`Read` privileges for the following {es} indices: + +* `logs-cloud_security_posture.findings_latest-*` +* `logs-cloud_security_posture.scores-*` + +[discrete] +=== {kib} privileges + +* `Security: Read` + + +[discrete] +== Write + +Users with these minimum permissions can view data on the **Findings** page and the Cloud Posture dashboard, create detection rules from the findings details flyout, and enable or disable benchmark rules. + +[discrete] +=== {es} index privileges +`Read` privileges for the following {es} indices: + +* `logs-cloud_security_posture.findings_latest-*` +* `logs-cloud_security_posture.scores-*` + +[discrete] +=== {kib} privileges + +* `Security: All` + + +[discrete] +== Manage + +Users with these minimum permissions can view data on the **Findings** page and the Cloud Posture dashboard, create detection rules from the findings details flyout, enable or disable benchmark rules, and install, update, or uninstall CSPM integrations and assets. + +[discrete] +=== {es} index privileges +`Read` privileges for the following {es} indices: + +* `logs-cloud_security_posture.findings_latest-*` +* `logs-cloud_security_posture.scores-*` + +[discrete] +=== {kib} privileges + +* `Security: All` +* `Spaces: All` +* `Fleet: All` +* `Integrations: All` + diff --git a/docs/serverless/cloud-native-security/environment-variable-capture.asciidoc b/docs/serverless/cloud-native-security/environment-variable-capture.asciidoc index 2e6e78ab6f..36a79f8763 100644 --- a/docs/serverless/cloud-native-security/environment-variable-capture.asciidoc +++ b/docs/serverless/cloud-native-security/environment-variable-capture.asciidoc @@ -22,9 +22,6 @@ To set up environment variable capture for an {agent} policy: . Enter the names of env vars you want to capture, separated by commas. For example: `PATH,USER` . Click **Save**. -[role="screenshot"] -image::images/environment-variable-capture/-cloud-native-security-env-var-capture.png[The "linux.advanced.capture_env_vars" advanced agent policy setting] - [discrete] [[find-cap-env-vars]] == Find captured environment variables diff --git a/docs/serverless/edr-install-config/agent-tamper-protection.asciidoc b/docs/serverless/edr-install-config/agent-tamper-protection.asciidoc index 56a1342d1e..07031a4696 100644 --- a/docs/serverless/edr-install-config/agent-tamper-protection.asciidoc +++ b/docs/serverless/edr-install-config/agent-tamper-protection.asciidoc @@ -50,3 +50,5 @@ If you need the uninstall token to remove {agent} from an endpoint, you can find + ** Click the **Show token** icon in the **Token** column to reveal a specific token. ** Click the **View uninstall command** icon in the **Actions** column to open the **Uninstall agent** flyout, containing the full uninstall command with the token. + +TIP: If you have many tamper-protected {agent} policies, you may want to <> in a single command. \ No newline at end of file diff --git a/docs/serverless/edr-install-config/uninstall-agent.asciidoc b/docs/serverless/edr-install-config/uninstall-agent.asciidoc index f07443259d..a1051813f7 100644 --- a/docs/serverless/edr-install-config/uninstall-agent.asciidoc +++ b/docs/serverless/edr-install-config/uninstall-agent.asciidoc @@ -54,6 +54,36 @@ C:\"Program Files"\Elastic\Agent\elastic-agent.exe uninstall --uninstall-token 1 ++++ +[discrete] +[[multiple-uninstall-tokens]] +== Provide multiple uninstall tokens + +If you have multiple tamper-protected {agent} policies, you may want to provide multiple uninstall tokens in a single command. There are two ways to do this: + +* The `--uninstall-token` command can receive multiple uninstall tokens separated by a comma, without spaces. ++ +[source,shell] +---------------------------------- +sudo elastic-agent uninstall -f --uninstall-token 7b3d364db8e0deb1cda696ae85e42644,a7336b71e243e7c92d9504b04a774266 +---------------------------------- + +* `--uninstall-token`'s argument can also be a path to a text file with one uninstall token per line. ++ +NOTE: You must use the full file path, otherwise the file may not be found. ++ +[source,shell] +---------------------------------- +sudo elastic-agent uninstall -f --uninstall-token /tmp/tokens.txt +---------------------------------- ++ +In this example, `tokens.txt` would contain: ++ +[source,txt] +---------------------------------- +7b3d364db8e0deb1cda696ae85e42644 +a7336b71e243e7c92d9504b04a774266 +---------------------------------- + [discrete] [[uninstall-endpoint]] == Uninstall {elastic-endpoint} diff --git a/docs/serverless/images/environment-variable-capture/-cloud-native-security-env-var-capture.png b/docs/serverless/images/environment-variable-capture/-cloud-native-security-env-var-capture.png deleted file mode 100644 index d62ca4149c..0000000000 Binary files a/docs/serverless/images/environment-variable-capture/-cloud-native-security-env-var-capture.png and /dev/null differ diff --git a/docs/serverless/images/turn-on-risk-engine/preview-risky-entities.png b/docs/serverless/images/turn-on-risk-engine/preview-risky-entities.png index 838ee1a7ff..ce345d40e4 100644 Binary files a/docs/serverless/images/turn-on-risk-engine/preview-risky-entities.png and b/docs/serverless/images/turn-on-risk-engine/preview-risky-entities.png differ diff --git a/docs/serverless/images/turn-on-risk-engine/turn-on-risk-engine.png b/docs/serverless/images/turn-on-risk-engine/turn-on-risk-engine.png index 7593e7df10..4bc05a67e0 100644 Binary files a/docs/serverless/images/turn-on-risk-engine/turn-on-risk-engine.png and b/docs/serverless/images/turn-on-risk-engine/turn-on-risk-engine.png differ diff --git a/docs/serverless/index.asciidoc b/docs/serverless/index.asciidoc index c6b58e67b0..056e164581 100644 --- a/docs/serverless/index.asciidoc +++ b/docs/serverless/index.asciidoc @@ -88,6 +88,7 @@ include::./cloud-native-security/cspm.asciidoc[leveloffset=+3] include::./cloud-native-security/cspm-get-started.asciidoc[leveloffset=+4] include::./cloud-native-security/cspm-get-started-gcp.asciidoc[leveloffset=+4] include::./cloud-native-security/cspm-get-started-azure.asciidoc[leveloffset=+4] +include::./cloud-native-security/cspm-permissions.asciidoc[leveloffset=+4] include::./cloud-native-security/cspm-findings-page.asciidoc[leveloffset=+4] include::./cloud-native-security/benchmark-rules.asciidoc[leveloffset=+4] include::./cloud-native-security/cspm-cloud-posture-dashboard-dash.asciidoc[leveloffset=+4] diff --git a/docs/serverless/investigate/case-permissions.asciidoc b/docs/serverless/investigate/case-permissions.asciidoc index 237a29c724..8ddcd52e96 100644 --- a/docs/serverless/investigate/case-permissions.asciidoc +++ b/docs/serverless/investigate/case-permissions.asciidoc @@ -26,7 +26,7 @@ To grant access to cases in a custom role, set the privileges for the **Cases** | Give full access to manage cases and settings a| * **All** for the **Cases** feature under **Security** -* **All** for the **{connectors-feature}** feature under **Management** +* **All** for the **{connectors-feature}** feature under **Stack Management** [NOTE] ==== @@ -49,7 +49,7 @@ a| **Read** for the **Security** feature and **All** for the **Cases** feature [NOTE] ==== -You can customize the sub-feature privileges to allow access to deleting cases, deleting alerts and comments from cases, and viewing or editing case settings. +You can customize the sub-feature privileges to allow access to deleting cases, deleting alerts and comments from cases, viewing or editing case settings, adding case comments and attachments, and re-opening cases. ==== | Revoke all access to cases diff --git a/docs/serverless/investigate/cases-open-manage.asciidoc b/docs/serverless/investigate/cases-open-manage.asciidoc index 16dce41c40..a438b5f535 100644 --- a/docs/serverless/investigate/cases-open-manage.asciidoc +++ b/docs/serverless/investigate/cases-open-manage.asciidoc @@ -248,13 +248,13 @@ Use the **Export** option to move cases between different {elastic-sec} instance ==== The following attachments are _not_ exported: -* **Case files**: Case files are not exported. However, they are accessible in **Project settings** → **Management** → **Files** to download and re-add. +* **Case files**: Case files are not exported. However, they are accessible in **Project Settings** → **Stack Management** → **Files** to download and re-add. * **Alerts**: Alerts attached to cases are not exported. You must re-add them after importing cases. ==== To export a case: -. Go to **Project settings** → **Management** → **Saved objects**. +. Go to **Project Settings** → **Stack Management** → **Saved objects**. . Search for the case by choosing a saved object type or entering the case title in the search bar. . Select one or more cases, then click the **Export** button. . Click **Export**. A confirmation message that your file is downloading displays. @@ -283,6 +283,6 @@ To import a case: ==== Be mindful of the following: -* If the imported case had connectors attached to it, you'll be prompted to re-authenticate the connectors. To do so, click **Go to connectors** on the **Import saved objects** flyout and complete the necessary steps. Alternatively, open the main menu, then go to **Project settings** → **Management** → **{connectors-ui}** to access connectors. +* If the imported case had connectors attached to it, you'll be prompted to re-authenticate the connectors. To do so, click **Go to connectors** on the **Import saved objects** flyout and complete the necessary steps. Alternatively, open the main menu, then go to **Project Settings** → **Stack Management** → **{connectors-ui}** to access connectors. * If the imported case had attached alerts, verify that the alerts' source documents exist in the environment. Case features that interact with alerts (such as the Alert details flyout and rule details page) rely on the alerts' source documents to function. ==== diff --git a/docs/serverless/rules/detection-engine-overview.asciidoc b/docs/serverless/rules/detection-engine-overview.asciidoc index 4ec9db3dcd..605a843dc8 100644 --- a/docs/serverless/rules/detection-engine-overview.asciidoc +++ b/docs/serverless/rules/detection-engine-overview.asciidoc @@ -144,6 +144,6 @@ and you should contact your project administrator. [discrete] [[detections-logsdb-index-mode]] -== Using logsDB index mode +== Using logsdb index mode -LogsDB is enabled by default for Elastic serverless. Refer to <> to learn more. \ No newline at end of file +logsdb is enabled by default for Elastic serverless. Refer to <> to learn more. \ No newline at end of file diff --git a/docs/serverless/rules/detections-logsdb-impact.asciidoc b/docs/serverless/rules/detections-logsdb-impact.asciidoc index f3f97a3f38..dbbceda14c 100644 --- a/docs/serverless/rules/detections-logsdb-impact.asciidoc +++ b/docs/serverless/rules/detections-logsdb-impact.asciidoc @@ -1,11 +1,11 @@ [[detections-logsdb-index-mode-impact]] -= Using logsDB index mode with {sec-serverless} += Using logsdb index mode with {sec-serverless} -LogsDB is enabled by default for {serverless-full}. This topic explains the impact of using logsDB index mode with {sec-serverless}. +Logsdb is enabled by default for {serverless-full}. This topic explains the impact of using logsdb index mode with {sec-serverless}. -With logsDB index mode, the original `_source` field is not stored in the index but can be reconstructed using {ref}/mapping-source-field.html#synthetic-source[synthetic `_source`]. +With logsdb index mode, the original `_source` field is not stored in the index but can be reconstructed using {ref}/mapping-source-field.html#synthetic-source[synthetic `_source`]. -When the `_source` is reconstructed, {ref}/mapping-source-field.html#synthetic-source-modifications[modifications] are possible. Therefore, there could be a mismatch between user's expectations and how fields are formatted. +When the `_source` is reconstructed, {ref}/mapping-source-field.html#synthetic-source-modifications[modifications] are possible. Therefore, there could be a mismatch between users' expectations and how fields are formatted. Continue reading to find out how this affects specific {sec-serverless} components. @@ -13,9 +13,9 @@ Continue reading to find out how this affects specific {sec-serverless} componen [[logsdb-alerts]] == Alerts -When alerts are generated, the `_source` event is copied into the alert to retain the original data. When the logsDB index mode is applied, the `_source` event stored in the alert is reconstructed using synthetic `_source`. +When alerts are generated, the `_source` event is copied into the alert to retain the original data. When the logsdb index mode is applied, the `_source` event stored in the alert is reconstructed using synthetic `_source`. -If you're switching to use logsDB index mode, the `_source` field stored in the alert might look different in certain situations: +If you're switching to use logsdb index mode, the `_source` field stored in the alert might look different in certain situations: * {ref}/mapping-source-field.html#synthetic-source-modifications-leaf-arrays[Arrays can be reconstructed differently or deduplicated] * {ref}/mapping-source-field.html#synthetic-source-modifications-field-names[Field names] @@ -37,7 +37,7 @@ While we do not recommend using `_source` for actions, in cases where the action If you send alert notifications by enabling {kibana-ref}/alerting-getting-started.html#alerting-concepts-actions[actions] to the external systems that have workflows or automations based on fields formatted from the original source, they may be affected. In particular, this can happen when the fields used are arrays of objects. -We recommend checking and adjusting the rule actions using `_source` before switching to logsDB index mode. +We recommend checking and adjusting the rule actions using `_source` before switching to logsdb index mode. [discrete] [[logsdb-runtime-fields]] @@ -45,7 +45,7 @@ We recommend checking and adjusting the rule actions using `_source` before swit Runtime fields that reference `_source` may be affected. Some runtime fields might not work and need to be adjusted. For example, if an event was indexed with the value of `agent.name` in the dot-notation form, it will be returned in the nested form and might not work. -The following is an example of accessing `_source` that works with the logsDB index mode enabled: +The following is an example of accessing `_source` that works with the logsdb index mode enabled: [source,console] ---- @@ -55,7 +55,7 @@ The following is an example of accessing `_source` that works with the logsDB in "source": """ emit($('agent.name', null) + "_____" + doc['agent.name'].value ); """ ---- -The following will not work with synthetic source (logsDB index mode enabled): +The following will not work with synthetic source (logsdb index mode enabled): [source,console] ---- diff --git a/docs/serverless/settings/advanced-settings.asciidoc b/docs/serverless/settings/advanced-settings.asciidoc index b100aa047f..1a43e02d7b 100644 --- a/docs/serverless/settings/advanced-settings.asciidoc +++ b/docs/serverless/settings/advanced-settings.asciidoc @@ -139,12 +139,6 @@ retrieved. The `securitySolution:maxUnassociatedNotes` field determines the maximum number of <> that you can attach to alerts and events. The maximum limit and default value is 1000. -[discrete] -[[security-advanced-settings-exclude-cold-and-frozen-tier-data-from-analyzer-queries]] -== Exclude cold and frozen tier data from analyzer queries - -Including data from cold and frozen {ref}/data-tiers.html[data tiers] in <> queries may result in performance degradation. The `securitySolution:excludeColdAndFrozenTiersInAnalyzer` setting allows you to exclude this data from analyzer queries. This setting is turned off by default. - [discrete] [[visualizations-in-flyout]] == Access the event analyzer and session view from the event or alert details flyout @@ -200,14 +194,6 @@ Adds a link to https://www.dnschecker.org[https://www.dnschecker.org] on **IP de ] ---- -[discrete] -[[enable-ccs-warning]] -== Configure cross-cluster search privilege warnings - -Each time a detection rule runs using a remote cross-cluster search (CCS) index pattern, it will return a warning saying that the rule may not have the required `read` privileges to the remote index. Because privileges cannot be checked across remote indices, this warning displays even when the rule actually does have `read` privileges to the remote index. - -If you've ensured that your detection rules have the required privileges across your remote indices, you can use the `securitySolution:enableCcsWarning` setting to disable this warning and reduce noise. - [discrete] [[show-related-integrations]] == Show/hide related integrations in Rules page tables