diff --git a/.backportrc.json b/.backportrc.json index 558a4c70f3..93386e593f 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -1,5 +1,5 @@ { "upstream": "elastic/security-docs", - "branches": [{ "name": "7.x", "checked": true }, "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "7.16", "7.15", "7.14", "7.13", "7.12", "7.11", "7.10", "7.9", "7.8"], + "branches": ["8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "7.16", "7.15", "7.14", "7.13", "7.12", "7.11", "7.10", "7.9", "7.8"], "labels": ["backport"] } diff --git a/docs/AI-for-security/ai-for-security.asciidoc b/docs/AI-for-security/ai-for-security.asciidoc index 0a08f8d4da..01974f06e8 100644 --- a/docs/AI-for-security/ai-for-security.asciidoc +++ b/docs/AI-for-security/ai-for-security.asciidoc @@ -8,17 +8,19 @@ You can use {elastic-sec}'s built-in AI tools to speed up your work and augment your team's capabilities. The pages in this section describe <>, which answers questions and enhances your workflows throughout {elastic-sec}, and <>, which speeds up the triage process by finding patterns and identifying attacks spanning multiple alerts. -include::security-assistant.asciidoc[leveloffset=+1] +include::ai-security-assistant.asciidoc[leveloffset=+1] include::attack-discovery.asciidoc[leveloffset=+1] -include::llm-connector-guides.asciidoc[leveloffset=+1] -include::azure-openai-setup.asciidoc[leveloffset=+2] +include::connector-guides-landing-pg.asciidoc[leveloffset=+1] +include::connect-to-azure-openai.asciidoc[leveloffset=+2] include::connect-to-bedrock.asciidoc[leveloffset=+2] include::connect-to-openai.asciidoc[leveloffset=+2] +include::connect-to-byo.asciidoc[leveloffset=+2] -include::ai-use-cases.asciidoc[leveloffset=+1] -include::ai-alert-triage.asciidoc[leveloffset=+2] -include::use-attack-discovery-ai-assistant-incident-reporting.asciidoc[leveloffset=+2] -include::ai-esql-queries.asciidoc[leveloffset=+2] + +include::usecase-landing-pg.asciidoc[leveloffset=+1] +include::usecase-alert-triage.asciidoc[leveloffset=+2] +include::usecase-attack-discovery-ai-assistant-incident-reporting.asciidoc[leveloffset=+2] +include::usecase-esql-queries.asciidoc[leveloffset=+2] include::llm-performance-matrix.asciidoc[leveloffset=+1] diff --git a/docs/AI-for-security/security-assistant.asciidoc b/docs/AI-for-security/ai-security-assistant.asciidoc similarity index 100% rename from docs/AI-for-security/security-assistant.asciidoc rename to docs/AI-for-security/ai-security-assistant.asciidoc diff --git a/docs/AI-for-security/attack-discovery.asciidoc b/docs/AI-for-security/attack-discovery.asciidoc index 0be333f939..e1ac8aa1ce 100644 --- a/docs/AI-for-security/attack-discovery.asciidoc +++ b/docs/AI-for-security/attack-discovery.asciidoc @@ -52,7 +52,11 @@ image::images/select-model-empty-state.png[] + . Once you've selected a connector, click **Generate** to start the analysis. -It may take from a few seconds up to several minutes to generate discoveries, depending on the number of alerts and the model you selected. Note that Attack discovery is in technical preview and will only analyze opened and acknowleged alerts from the past 24 hours. +It may take from a few seconds up to several minutes to generate discoveries, depending on the number of alerts and the model you selected. + +IMPORTANT: Attack discovery is in technical preview and will only analyze opened and acknowleged alerts from the past 24 hours. By default it only analyzes up to 20 alerts within this timeframe, but you can expand this up to 100 by going to **AI Assistant → Settings (image:images/icon-settings.png[Settings icon,17,17]) → Knowledge Base** and updating the **Alerts** setting. + +image::images/knowledge-base-settings.png["AI Assistant's settings menu open to the Knowledge Base tab",75%] IMPORTANT: Attack discovery uses the same data anonymization settings as <>. To configure which alert fields are sent to the LLM and which of those fields are obfuscated, use the Elastic AI Assistant settings. Consider the privacy policies of third-party LLMs before sending them sensitive data. diff --git a/docs/AI-for-security/azure-openai-setup.asciidoc b/docs/AI-for-security/connect-to-azure-openai.asciidoc similarity index 100% rename from docs/AI-for-security/azure-openai-setup.asciidoc rename to docs/AI-for-security/connect-to-azure-openai.asciidoc diff --git a/docs/AI-for-security/connect-to-byo.asciidoc b/docs/AI-for-security/connect-to-byo.asciidoc new file mode 100644 index 0000000000..6dc6a88648 --- /dev/null +++ b/docs/AI-for-security/connect-to-byo.asciidoc @@ -0,0 +1,192 @@ +[[connect-to-byo-llm]] += Connect to your own local LLM + +:frontmatter-description: Set up a connector to LM Studio so you can use a local model with AI Assistant. +:frontmatter-tags-products: [security] +:frontmatter-tags-content-type: [guide] +:frontmatter-tags-user-goals: [get-started] + +This page provides instructions for setting up a connector to a large language model (LLM) of your choice using LM Studio. This allows you to use your chosen model within {elastic-sec}. You'll first need to set up a reverse proxy to communicate with {elastic-sec}, then set up LM Studio on a server, and finally configure the connector in your Elastic deployment. https://www.elastic.co/blog/ai-assistant-locally-hosted-models[Learn more about the benefits of using a local LLM]. + +This example uses a single server hosted in GCP to run the following components: + +* LM Studio with the https://mistral.ai/technology/#models[Mixtral-8x7b] model +* A reverse proxy using Nginx to authenticate to Elastic Cloud + +image::images/lms-studio-arch-diagram.png[Architecture diagram for this guide] + +NOTE: For testing, you can use alternatives to Nginx such as https://learn.microsoft.com/en-us/azure/developer/dev-tunnels/overview[Azure Dev Tunnels] or https://ngrok.com/[Ngrok], but using Nginx makes it easy to collect additional telemetry and monitor its status by using Elastic's native Nginx integration. While this example uses cloud infrastructure, it could also be replicated locally without an internet connection. + +[discrete] +== Configure your reverse proxy + +NOTE: If your Elastic instance is on the same host as LM Studio, you can skip this step. + +You need to set up a reverse proxy to enable communication between LM Studio and Elastic. For more complete instructions, refer to a guide such as https://www.digitalocean.com/community/tutorials/how-to-configure-nginx-as-a-reverse-proxy-on-ubuntu-22-04[this one]. + +The following is an example Nginx configuration file: + +[source,txt] +-------------------------------------------------- +server { + listen 80; + listen [::]:80; + server_name ; + server_tokens off; + add_header x-xss-protection "1; mode=block" always; + add_header x-frame-options "SAMEORIGIN" always; + add_header X-Content-Type-Options "nosniff" always; + return 301 https://$server_name$request_uri; +} + +server { + + listen 443 ssl http2; + listen [::]:443 ssl http2; + server_name ; + server_tokens off; + ssl_certificate /etc/letsencrypt/live//fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live//privkey.pem; + ssl_session_timeout 1d; + ssl_session_cache shared:SSL:50m; + ssl_session_tickets on; + ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256'; + ssl_protocols TLSv1.3 TLSv1.2; + ssl_prefer_server_ciphers on; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; + add_header x-xss-protection "1; mode=block" always; + add_header x-frame-options "SAMEORIGIN" always; + add_header X-Content-Type-Options "nosniff" always; + add_header Referrer-Policy "strict-origin-when-cross-origin" always; + ssl_stapling on; + ssl_stapling_verify on; + ssl_trusted_certificate /etc/letsencrypt/live//fullchain.pem; + resolver 1.1.1.1; + location / { + + if ($http_authorization != "Bearer ") { + return 401; +} + + proxy_pass http://localhost:1234/; + } + +} +-------------------------------------------------- + +IMPORTANT: If using the example configuration file above, you must replace several values: Replace `` with your actual token, and keep it safe since you'll need it to set up the {elastic-sec} connector. Replace `` with your actual domain name. Update the `proxy_pass` value at the bottom of the configuration if you decide to change the port number in LM Studio to something other than 1234. + +[discrete] +=== (Optional) Set up performance monitoring for your reverse proxy +You can use Elastic's {integrations-docs}/nginx[Nginx integration] to monitor performance and populate monitoring dashboards in the {security-app}. + +[discrete] +== Configure LM Studio and download a model + +First, install https://lmstudio.ai/[LM Studio]. LM Studio supports the OpenAI SDK, which makes it compatible with Elastic's OpenAI connector, allowing you to connect to any model available in the LM Studio marketplace. + +One current limitation of LM Studio is that when it is installed on a server, you must launch the application using its GUI before doing so using the CLI. For example, by using Chrome RDP with an https://cloud.google.com/architecture/chrome-desktop-remote-on-compute-engine[X Window System]. After you've opened the application the first time using the GUI, you can start it by using `sudo lms server start` in the CLI. + +Once you've launched LM Studio: + +1. Go to LM Studio's Search window. +2. Search for an LLM (for example, `Mixtral-8x7B-instruct`). Your chosen model must include `instruct` in its name in order to work with Elastic. +3. Filter your search for "Compatibility Guess" to optimize results for your hardware. Results will be color coded: + * Green means "Full GPU offload possible", which yields the best results. + * Blue means "Partial GPU offload possible", which may work. + * Red for "Likely too large for this machine", which typically will not work. +4. Download one or more models. + +IMPORTANT: For security reasons, before downloading a model, verify that it is from a trusted source. It can be helpful to review community feedback on the model (for example using a site like Hugging Face). + +image::images/lms-model-select.png[The LM Studio model selection interface] + +In this example we used https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF[`TheBloke/Mixtral-8x7B-Instruct-v0.1.Q3_K_M.gguf`]. It has 46.7B total parameters, a 32,000 token context window, and uses GGUF https://huggingface.co/docs/transformers/main/en/quantization/overview[quanitization]. For more information about model names and format information, refer to the following table. + +[cols="1,1,1,1", options="header"] +|=== +| Model Name | Parameter Size | Tokens/Context Window | Quantization Format +| Name of model, sometimes with a version number. +| LLMs are often compared by their number of parameters — higher numbers mean more powerful models. +| Tokens are small chunks of input information. Tokens do not necessarily correspond to characters. You can use https://platform.openai.com/tokenizer[Tokenizer] to see how many tokens a given prompt might contain. +| Quantization reduces overall parameters and helps the model to run faster, but reduces accuracy. +| Examples: Llama, Mistral, Phi-3, Falcon. +| The number of parameters is a measure of the size and the complexity of the model. The more parameters a model has, the more data it can process, learn from, generate, and predict. +| The context window defines how much information the model can process at once. If the number of input tokens exceeds this limit, input gets truncated. +| Specific formats for quantization vary, most models now support GPU rather than CPU offloading. +|=== + +[discrete] +== Load a model in LM Studio + +After downloading a model, load it in LM Studio using the GUI or LM Studio's https://lmstudio.ai/blog/lms[CLI tool]. + +[discrete] +=== Option 1: load a model using the CLI (Recommended) + +It is a best practice to download models from the marketplace using the GUI, and then load or unload them using the CLI. The GUI allows you to search for models, whereas the CLI only allows you to import specific paths, but the CLI provides a good interface for loading and unloading. + +Use the following commands in your CLI: + +1. Verify LM Studio is installed: `lms` +2. Check LM Studio's status: `lms status` +3. List all downloaded models: `lms ls` +4. Load a model: `lms load` + +image::images/lms-cli-welcome.png[The CLI interface during execution of initial LM Studio commands] + +After the model loads, you should see a `Model loaded successfully` message in the CLI. + +image::images/lms-studio-model-loaded-msg.png[The CLI message that appears after a model loads] + +To verify which model is loaded, use the `lms ps` command. + +image::images/lms-ps-command.png[The CLI message that appears after running lms ps] + +If your model uses NVIDIA drivers, you can check the GPU performance with the `sudo nvidia-smi` command. + +[discrete] +=== Option 2: load a model using the GUI + +Refer to the following video to see how to load a model using LM Studio's GUI. You can change the **port** setting, which is referenced in the Nginx configuration file. Note that the **GPU offload** was set to **Max**. + +======= +++++ + + +
+++++ +======= + +[discrete] +== (Optional) Collect logs using Elastic's Custom Logs integration + +You can monitor the performance of the host running LM Studio using Elastic's {integrations-docs}/log[Custom Logs integration]. This can also help with troubleshooting. Note that the default path for LM Studio logs is `/tmp/lmstudio-server-log.txt`, as in the following screenshot: + +image::images/lms-custom-logs-config.png[The configuration window for the custom logs integration] + +[discrete] +== Configure the connector in your Elastic deployment + +Finally, configure the connector: + +1. Log in to your Elastic deployment. +2. Navigate to **Stack Management → Connectors → Create Connector → OpenAI**. The OpenAI connector enables this use case because LM Studio uses the OpenAI SDK. +3. Name your connector to help keep track of the model version you are using. +4. Under **URL**, enter the domain name specified in your Nginx configuration file, followed by `/v1/chat/completions`. +5. Under **Default model**, enter `local-model`. +6. Under **API key**, enter the secret token specified in your Nginx configuration file. +7. Click **Save**. + +image::images/lms-edit-connector.png[The Edit connector page in the {security-app}, with appropriate values populated] + +Setup is now complete. You can use the model you've loaded in LM Studio to power Elastic's generative AI features. You can test a variety of models as you interact with AI Assistant to see what works best without having to update your connector. + +NOTE: While local models work well for <>, we recommend you use one of <> for interacting with <>. As local models become more performant over time, this is likely to change. diff --git a/docs/AI-for-security/llm-connector-guides.asciidoc b/docs/AI-for-security/connector-guides-landing-pg.asciidoc similarity index 89% rename from docs/AI-for-security/llm-connector-guides.asciidoc rename to docs/AI-for-security/connector-guides-landing-pg.asciidoc index ead15956b1..9e0c68205d 100644 --- a/docs/AI-for-security/llm-connector-guides.asciidoc +++ b/docs/AI-for-security/connector-guides-landing-pg.asciidoc @@ -8,4 +8,4 @@ Setup guides are available for the following LLM providers: * <> * <> * <> - +* <> diff --git a/docs/AI-for-security/images/lms-cli-welcome.png b/docs/AI-for-security/images/lms-cli-welcome.png new file mode 100644 index 0000000000..c857d01454 Binary files /dev/null and b/docs/AI-for-security/images/lms-cli-welcome.png differ diff --git a/docs/AI-for-security/images/lms-custom-logs-config.png b/docs/AI-for-security/images/lms-custom-logs-config.png new file mode 100644 index 0000000000..35e82e89cd Binary files /dev/null and b/docs/AI-for-security/images/lms-custom-logs-config.png differ diff --git a/docs/AI-for-security/images/lms-edit-connector.png b/docs/AI-for-security/images/lms-edit-connector.png new file mode 100644 index 0000000000..0359253eb1 Binary files /dev/null and b/docs/AI-for-security/images/lms-edit-connector.png differ diff --git a/docs/AI-for-security/images/lms-model-select.png b/docs/AI-for-security/images/lms-model-select.png new file mode 100644 index 0000000000..454fa2a1ab Binary files /dev/null and b/docs/AI-for-security/images/lms-model-select.png differ diff --git a/docs/AI-for-security/images/lms-ps-command.png b/docs/AI-for-security/images/lms-ps-command.png new file mode 100644 index 0000000000..af72b6976c Binary files /dev/null and b/docs/AI-for-security/images/lms-ps-command.png differ diff --git a/docs/AI-for-security/images/lms-studio-arch-diagram.png b/docs/AI-for-security/images/lms-studio-arch-diagram.png new file mode 100644 index 0000000000..4b737bbb7c Binary files /dev/null and b/docs/AI-for-security/images/lms-studio-arch-diagram.png differ diff --git a/docs/AI-for-security/images/lms-studio-model-loaded-msg.png b/docs/AI-for-security/images/lms-studio-model-loaded-msg.png new file mode 100644 index 0000000000..c2e3ec8114 Binary files /dev/null and b/docs/AI-for-security/images/lms-studio-model-loaded-msg.png differ diff --git a/docs/AI-for-security/ai-alert-triage.asciidoc b/docs/AI-for-security/usecase-alert-triage.asciidoc similarity index 100% rename from docs/AI-for-security/ai-alert-triage.asciidoc rename to docs/AI-for-security/usecase-alert-triage.asciidoc diff --git a/docs/AI-for-security/use-attack-discovery-ai-assistant-incident-reporting.asciidoc b/docs/AI-for-security/usecase-attack-discovery-ai-assistant-incident-reporting.asciidoc similarity index 100% rename from docs/AI-for-security/use-attack-discovery-ai-assistant-incident-reporting.asciidoc rename to docs/AI-for-security/usecase-attack-discovery-ai-assistant-incident-reporting.asciidoc diff --git a/docs/AI-for-security/ai-esql-queries.asciidoc b/docs/AI-for-security/usecase-esql-queries.asciidoc similarity index 100% rename from docs/AI-for-security/ai-esql-queries.asciidoc rename to docs/AI-for-security/usecase-esql-queries.asciidoc diff --git a/docs/AI-for-security/ai-use-cases.asciidoc b/docs/AI-for-security/usecase-landing-pg.asciidoc similarity index 100% rename from docs/AI-for-security/ai-use-cases.asciidoc rename to docs/AI-for-security/usecase-landing-pg.asciidoc diff --git a/docs/advanced-entity-analytics/entity-risk-scoring.asciidoc b/docs/advanced-entity-analytics/entity-risk-scoring.asciidoc index 6eb5042269..932e6b07fb 100644 --- a/docs/advanced-entity-analytics/entity-risk-scoring.asciidoc +++ b/docs/advanced-entity-analytics/entity-risk-scoring.asciidoc @@ -1,6 +1,11 @@ [[entity-risk-scoring]] = Entity risk scoring +[sidebar] +-- +If you’ve installed the original user and host risk score modules, refer to {security-guide-all}/8.11/host-risk-score.html[Host risk score] and {security-guide-all}/8.11/user-risk-score.html[User risk score]. +-- + beta::[] Entity risk scoring is an advanced {elastic-sec} analytics feature that helps security analysts detect changes in an entity's risk posture, hunt for new threats, and prioritize incident response. diff --git a/docs/advanced-entity-analytics/turn-on-risk-engine.asciidoc b/docs/advanced-entity-analytics/turn-on-risk-engine.asciidoc index 7cbd3c1e40..5391c71fad 100644 --- a/docs/advanced-entity-analytics/turn-on-risk-engine.asciidoc +++ b/docs/advanced-entity-analytics/turn-on-risk-engine.asciidoc @@ -23,7 +23,7 @@ image::images/preview-risky-entities.png[Preview of risky entities] [NOTE] ====== * To view risk score data, you must have alerts generated in your environment. -* If you previously installed the original <> and <> modules, and you're upgrading to {stack} version 8.11 or newer, refer to <>. +* If you previously installed the original user and host risk score modules, and you're upgrading to {stack} version 8.11 or newer, refer to <>. ====== If you're installing the risk scoring engine for the first time: diff --git a/docs/cloud-native-security/d4c-get-started.asciidoc b/docs/cloud-native-security/d4c-get-started.asciidoc index 2722f20588..585959d8f3 100644 --- a/docs/cloud-native-security/d4c-get-started.asciidoc +++ b/docs/cloud-native-security/d4c-get-started.asciidoc @@ -6,6 +6,8 @@ :frontmatter-tags-content-type: [how-to] :frontmatter-tags-user-goals: [get-started] +beta::[] + This page describes how to set up Cloud Workload Protection (CWP) for Kubernetes. .Requirements diff --git a/docs/cloud-native-security/d4c-overview.asciidoc b/docs/cloud-native-security/d4c-overview.asciidoc index 9087458f34..13e4d75df2 100644 --- a/docs/cloud-native-security/d4c-overview.asciidoc +++ b/docs/cloud-native-security/d4c-overview.asciidoc @@ -1,6 +1,8 @@ [[d4c-overview]] = Cloud workload protection for Kubernetes +beta::[] + Elastic Cloud Workload Protection (CWP) for Kubernetes provides cloud-native runtime protections for containerized environments by identifying and optionally blocking unexpected system behavior in Kubernetes containers. [[d4c-use-cases]] diff --git a/docs/cloud-native-security/vuln-management-faq.asciidoc b/docs/cloud-native-security/vuln-management-faq.asciidoc index 9940cb0d11..f86902fd8b 100644 --- a/docs/cloud-native-security/vuln-management-faq.asciidoc +++ b/docs/cloud-native-security/vuln-management-faq.asciidoc @@ -11,6 +11,10 @@ The CNVM integration uses various security data sources. The complete list can b CNVM uses the open source scanner https://github.com/aquasecurity/trivy[Trivy] v0.35. +*What system architectures are supported?* + +Because of Trivy's limitations, CNVM can only be deployed on ARM-based VMs. However, it can scan hosts regardless of system architecture. + *How often are the security data sources synchronized?* The CNVM integration fetches the latest data sources at the beginning of every scan cycle to ensure up-to-date vulnerability information. diff --git a/docs/cloud-native-security/vuln-management-get-started.asciidoc b/docs/cloud-native-security/vuln-management-get-started.asciidoc index 6736d9a4bc..7369634175 100644 --- a/docs/cloud-native-security/vuln-management-get-started.asciidoc +++ b/docs/cloud-native-security/vuln-management-get-started.asciidoc @@ -8,7 +8,8 @@ This page explains how to set up Cloud Native Vulnerability Management (CNVM). -- * CNVM is available to all {ecloud} users. On-premise deployments require an https://www.elastic.co/pricing[Enterprise subscription]. * Requires {stack} and {agent} version 8.8 or higher. -* CNVM only works in the `Default` {kib} space. Installing the CNVM integration on a different {kib} space will not work. +* Only works in the `Default` {kib} space. Installing the CNVM integration on a different {kib} space will not work. +* CNVM can only be deployed on ARM-based VMs. * To view vulnerability scan findings, you need at least `read` privileges for the following indices: ** `logs-cloud_security_posture.vulnerabilities-*` ** `logs-cloud_security_posture.vulnerabilities_latest-*` diff --git a/docs/detections/alerts-view-details.asciidoc b/docs/detections/alerts-view-details.asciidoc index 5910de7e6f..f66fb14184 100644 --- a/docs/detections/alerts-view-details.asciidoc +++ b/docs/detections/alerts-view-details.asciidoc @@ -144,8 +144,6 @@ image::images/insights-section-rp.png[Insights section of the Overview tab, 65%] The Entities overview provides high-level details about the user and host that are related to the alert. Host and user risk classifications are also available with a https://www.elastic.co/pricing[Platinum subscription] or higher. -NOTE: <> and <> risk scores are technical preview features. - [role="screenshot"] image::images/entities-overview.png[Overview of the entity details section in the right panel, 60%] diff --git a/docs/experimental-features/beaconing-detection.asciidoc b/docs/experimental-features/beaconing-detection.asciidoc deleted file mode 100644 index 6b2301e5e5..0000000000 --- a/docs/experimental-features/beaconing-detection.asciidoc +++ /dev/null @@ -1,54 +0,0 @@ -[[network-beaconing-framework]] -== Network Beaconing - -This feature provides an early warning system for command and control beaconing activity. It monitors network traffic for indicators of compromise and provides analytics to add context to alerts and aid your threat hunting. - -[discrete] -=== Deploy the package - -To deploy the network beaconing framework in your environment, follow {integrations-docs}/beaconing#installation[these steps]. - -The installation package includes dashboards for monitoring beaconing activity in your environment. You can review signals using a Lens dashboard called Network beaconing. - -NOTE: If you want to modify any of the package components, you can install the package manually by following https://github.com/elastic/detection-rules/blob/main/docs/experimental-machine-learning/beaconing.md[these steps]. - -[role="screenshot"] -image::images/beaconing-detection-1.png[] - -[discrete] -=== Feature details - -This feature uses a {ref}/transforms.html[transform] to categorize network data by host and process name, then runs scripted metric aggregations on the host-process name pairs. For a given time window, the scripted metric aggregation checks each pair for the following: - -* Signals repeating at regular intervals, accounting for minor variations in those intervals. -* Low variation of bytes sent from source to destination. -* Low variation of bytes sent from destination to source. - -The transform, which runs every hour, also filters out common, known applications and IPs to reduce false positives. The transform outputs information about the detection, process, and host indicators, for example: - -[role="screenshot"] -image::images/beaconing-detection-2.png[] -The values highlighted above are typical of beaconing behavior and can help with your investigation. - -[discrete] -=== Further customizations - -Advanced users can also tune the scripted metric aggregation's parameters, such as jitter percentage or time window. To overwrite the default parameters: delete the transform, change the parameters, and restart the transform. The configurable parameters are: - -* `number_buckets_in_range`: The number of time buckets into which the time window is split. Using more buckets improves estimates for various statistics, but also increases resource usage. -* `time_bucket_length`: The length of each time bucket. A higher value indicates a longer time window. Set this to a higher value to check for very low-frequency beacons. -* `number_destination_ips`: The number of destination IPs to collect in results. Setting this to a higher value increases resource usage. -* `max_beaconing_bytes_cov`: The maximum coefficient of variation in the payload bytes for the low source and destination bytes variance test. Higher values increase the chance of flagging traffic as beaconing, increasing https://en.wikipedia.org/wiki/Precision_and_recall[recall] while reducing https://en.wikipedia.org/wiki/Precision_and_recall[precision]. -* `max_beaconing_count_rv`: The maximum relative variance in the bucket counts for the high-frequency beacon test. As with `max_beaconing_bytes_cov`, tuning this parameter involves a tradeoff between recall and precision. -* `truncate_at`: The lower and upper fraction of bucket values discarded when computing `max_beaconing_bytes_cov` and `max_beaconing_count_rv`. This allows you to ignore occasional changes in traffic patterns. However, if you retain too small a fraction of the data, these tests will be unreliable. -* `min_beaconing_count_autocovariance`: The minimum autocorrelation of the signal for the low-frequency beacon test. Lowering this value generally increases recall for malicious command and control beacons, while reducing precision. -* `max_jitter`: The maximum amount of https://en.wikipedia.org/wiki/Jitter[jitter] assumed to be possible for a periodic beacon, as a fraction of its period. - -You can also make changes to the transform query. The default query looks for beaconing activity over a 6-hour time range, but you can change it. - -Beaconing is not used exclusively by malware. Many legitimate, benign processes also exhibit beacon-like activity. To reduce false positives, default filters in the transform query exclude known beaconing processes and IPs that fall into two groups: - -* The source IP is local and the destination is remote. -* The destination IP is in a block of known Microsoft IP addresses. - -You can create additional filters to meet the needs of your environment. diff --git a/docs/experimental-features/experimental-features-intro.asciidoc b/docs/experimental-features/experimental-features-intro.asciidoc deleted file mode 100644 index c4612ae097..0000000000 --- a/docs/experimental-features/experimental-features-intro.asciidoc +++ /dev/null @@ -1,9 +0,0 @@ -[[sec-experimental-intro]] -= Technical preview - -The features in this section are experimental and may be changed or removed completely in future releases. Elastic will make a best effort to fix any issues, but experimental features are not supported to the same level as generally available (GA) features. - - -include::host-risk-score.asciidoc[] -include::user-risk-score.asciidoc[] - diff --git a/docs/experimental-features/host-risk-score.asciidoc b/docs/experimental-features/host-risk-score.asciidoc deleted file mode 100644 index 73f1f30c2b..0000000000 --- a/docs/experimental-features/host-risk-score.asciidoc +++ /dev/null @@ -1,327 +0,0 @@ -[[host-risk-score]] -== Host risk score - -NOTE: This page refers to the original user and host risk score modules. If you have the original modules installed, and you're running {stack} version 8.11 or newer, you can <>. -For information about the latest risk engine, refer to <>. - -NOTE: This feature is available for {stack} versions 7.16.0 and newer and requires a https://www.elastic.co/pricing[Platinum subscription] or higher. - -The host risk score feature highlights risky hosts from within your environment. It utilizes a transform with a scripted metric aggregation to calculate host risk scores based on alerts that were generated within the past five days. The transform runs hourly to update the score as new alerts are generated. - -Each rule's contribution to the host risk score is based on the rule's risk score (`signal.rule.risk_score`) and a time decay factor to reduce the impact of stale alerts. The risk score is calculated using a weighted sum where rules with higher time-corrected risk scores also have higher weights. Each host risk score is normalized on a scale of 0 to 100. - -Specific host attributes can boost the final risk score. For example, alert activity on a server poses a greater risk than that on a laptop. Therefore, the host risk score is 1.5 times higher if the host is a server. This boosted score is finalized after calculating the weighted sum of the time-corrected risks. - -The following table shows how risk levels are applied to a host, based on the normalized risk score: - -[width="100%",options="header"] -|============================================== -|Risk level |Host risk score - -|Unknown |< 20 -|Low |20-40 -|Moderate |40-70 -|High | 70-90 -|Critical | > 90 - - -|============================================== - -[[enable-host-risk-score]] -[discrete] -=== Enable host risk score - -NOTE: To enable the host risk score feature, you must have alerts in your environment. If you previously enabled host risk score and are upgrading the {stack} to 8.5–8.10, refer to <>. - -You can enable host risk score from the following places in the {security-app}: - -* The Entity Analytics dashboard -* The *Host risk* tab on the Hosts page -* The *Host risk* tab on a host's details page - -Or, in {kib}, you can enable host risk score in Console. - -To enable host risk score from the Entity Analytics dashboard: - -. In the {security-app}, go to *Dashboards* -> *Entity Analytics*. -. In the Host Risk Scores section, click *Enable* to install the module. - -To enable host risk score from the Hosts page: - -. Go to *Explore* -> *Hosts*. -. Select the *Host risk* tab, then click *Enable* to install the module. - -[role="screenshot"] -image::images/enable-hrs.png[Enable Host Risk Score button] - -To enable host risk score from a host's details page: - -. Go to *Explore* -> *Hosts*. -. Select the *All hosts* tab, then click a host name. -. On the details page, scroll down to the data tables, then select the *Host risk* tab. -. Click *Enable* to install the module. - -To enable host risk score from Console in {kib}, open a browser window and enter the following URL: - -[source,console] ----------------------------------- -{KibanaURL}/s/{spaceID}/app/dev_tools#/console?load_from={KibanaURL}/s/{spaceID}/internal/risk_score/prebuilt_content/dev_tool/enable_host_risk_score ----------------------------------- - -NOTE: If there's existing content in Console, scroll to the bottom to find the output loaded. - -TIP: If you receive an error message during the installation process, delete the host risk score module manually, then re-enable it. Refer to <> for more information. - -[[upgrade-host-risk-score]] -[discrete] -=== Upgrade host risk score - -If you previously enabled host risk score and you're upgrading to {stack} version 8.11 or newer, you can <>. - -Before upgrading, note the following: - -* Since older data is not preserved, previous host risk scores will be deleted, and new scores will be created. However, if you want to retain old host risk scores, you can reindex them _before_ upgrading. To learn how, refer to {ref}/docs-reindex.html[Reindex API]. New data will be stored in the `ml_host_risk_score_` and `ml_host_risk_score_latest_` indices. - -* You must edit your {cloud}/ec-manage-kibana-settings.html#ec-manage-kibana-settings[{kib} user settings] and remove the `xpack.securitySolution.enableExperimental:['riskyHostsEnabled']` feature flag. - -After this is done, you can proceed with upgrading the host risk score feature from any of the following places in the {security-app}: - -* The Entity Analytics dashboard -* The *Host risk* tab on the Hosts page -* The *Host risk* tab on a host's details page - -NOTE: After you enable or upgrade host risk score, you might get a message that says, "No host risk score data available to display." To verify that the transform that installs the host risk score module is picking up data, refer to <>. - -TIP: If you receive an error message during the upgrade process, delete the host risk score module manually, and then re-enable it. Refer to <> for more information. - -[[analyze-host-risk-score]] -[discrete] -=== Analyze host risk score data - -It is recommended you analyze hosts with the highest risk scores first -- those in the `Critical` and `Moderate` categories. Host risk score data appears in the following places in the {security-app}: - -The `host.risk.calculated_level` column in the Alerts table: - -[role="screenshot"] -image::images/hrs-alerts-table.png[Host risk score in the Alerts table] - -The *Insights* -> *Entities* section on the *Overview* tab within the alert details flyout: - -[role="screenshot"] -image::images/score-in-flyout.png[Host risk score in alert details flyout,65%] - -The *Host risk classification* column in the All hosts table on the Hosts page: - -[role="screenshot"] -image::images/hrs-all-hosts.png[Host risk score on the Hosts page] - -The *Host risk* tab on the Hosts page: - -[role="screenshot"] -image::advanced-entity-analytics/images/hosts-hr-data.png[Host risk score on the Hosts page] - -The Overview section on the host details page: - -[role="screenshot"] -image::images/hrs-overview-section.png[Host risk score in Overview section] - -The *Host risk* tab on the host details page: - -[role="screenshot"] -image::images/hosts-by-risk-details-page.png[Host risk score on the Hosts risk tab] - -You can also visualize host risk score data using prebuilt dashboards that are automatically imported when the feature is enabled. - -To access the dashboards: - -. In {kib}, go to *Analytics* -> *Dashboard*, then search for `risk score`. -. Select *Drilldown of Host Risk Score* to analyze the risk components of a host, or *Current Risk Score for Hosts* to display a list of current risky hosts in your environment. - -[role="screenshot"] -image::images/select-hrs-dashboard.png[Select host risk score dashboard] - -In this example, we'll explore the *Drilldown of Host Risk Score* dashboard. - -[role="screenshot"] -image::images/full-dashboard.png[Shows dashboard] - -Use the histogram to track how the risk score for a particular host has changed over time. To specify a date range, use the date and time picker, or drag and select a time range within the histogram. - -[role="screenshot"] -image::images/histogram.png[] - -To go to the host's details page, click any host's corresponding bar in the histogram, then select *Go to Host View*. - -[role="screenshot"] -image::images/go-to-host-view.png[] - -The histogram shows historical changes in a particular host's risk score(s). To specify a date range, use the date and time picker, or drag and select a time range within the histogram. - -[role="screenshot"] -image::images/data-tables.png[] - -[[troubleshoot-host-risk-score]] -[discrete] -=== Troubleshooting - -During the installation or upgrade process, you may receive the following error messages: - -* `Saved object already exists` -* `Transform already exists` -* `Ingest pipeline already exists` - -In this case, we recommend that you manually delete the host risk score module, then re-enable it. To manually delete the module: - -. Delete the host risk score saved objects: -.. From the {kib} main menu, go to **Stack Management** -> **Kibana** -> **Saved Objects**. -.. Delete the saved objects that have the `Host Risk Score - ` tag. -+ -[role="screenshot"] -image::images/delete-hrs-saved-objects.png[Delete host risk score saved objects] -.. Delete the `Host Risk Score - ` tag. -+ -[role="screenshot"] -image::images/delete-hrs-tag.png[Delete host risk score tag] -. Stop and delete the host risk score transforms. You can do this using the {kib} UI or the {ref}/stop-transform.html[Stop transform API] and {ref}/delete-transform.html[Delete transform API]. -** To delete the host risk score transforms using the {kib} UI: -.. From the {kib} main menu, go to **Stack Management** -> **Data** -> **Transforms**. -.. Stop the following transforms, then delete them: -*** `ml_hostriskscore_latest_transform_` -*** `ml_hostriskscore_pivot_transform_` -** To delete the host risk score transforms using the API, run the following commands in Console: -.. Stop and delete the latest transform: -+ -[source,console] ----------------------------------- -POST _transform/ml_hostriskscore_latest_transform_/_stop -DELETE _transform/ml_hostriskscore_latest_transform_ ----------------------------------- -.. Stop and delete the pivot transform: -+ -[source,console] ----------------------------------- -POST _transform/ml_hostriskscore_pivot_transform_/_stop -DELETE _transform/ml_hostriskscore_pivot_transform_ ----------------------------------- -. Delete the host risk score ingest pipeline. You can do this using the {kib} UI or the {ref}/delete-pipeline-api.html[Delete pipeline API]. -** To delete the host risk score ingest pipeline using the {kib} UI: -.. From the {kib} main menu, go to **Stack Management** -> **Ingest** -> **Ingest Pipelines**. -.. Delete the `ml_hostriskscore_ingest_pipeline_` ingest pipeline. -** To delete the host risk score ingest pipeline using the Delete pipeline API, run the following command in Console: -+ -[source,console] ----------------------------------- -DELETE /_ingest/pipeline/ml_hostriskscore_ingest_pipeline_ ----------------------------------- -. Delete the stored host risk score scripts using the {ref}/delete-stored-script-api.html[Delete stored script API]. In Console, run the following commands: -+ -[source,console] ----------------------------------- -DELETE _scripts/ml_hostriskscore_levels_script_ -DELETE _scripts/ml_hostriskscore_init_script_ -DELETE _scripts/ml_hostriskscore_map_script_ -DELETE _scripts/ml_hostriskscore_reduce_script_ ----------------------------------- - -After manually deleting the host risk score saved objects, transforms, ingest pipeline, and stored scripts, follow the steps to <>. - -[[verify-host-risk-score]] -=== Verify that host risk score data installed successfully (Optional) - -After you enable or upgrade host risk score, the following message may appear: - -[role="screenshot"] -image::images/restart-hrs.png[Restart host risk score] - -If so, click *Restart* and allow at least an hour for the data to be generated. If data still doesn't appear, verify that host risk score data has been generated: - -In {kib}, run the following commands in Console to query the `ml_host_risk_score_` index: - -[source,console] ----------------------------------- -GET ml_host_risk_score_/_search ----------------------------------- - -If no data returns, you'll need to check if the alerts index (.`alerts-security.alerts-`) had alert data when `ml_hostriskscore_pivot_transform_` was started. - -Example: - -[source,console] ----------------------------------- -GET transform/ml_hostriskscore_pivot_transform_/_stats?human=true ----------------------------------- - -Here's an example response: - -[source,console] ----------------------------------- -{ - "count": 1, - "transforms": [ - { - "id": "ml_hostriskscore_pivot_transform_", - "state": "started", - "node": { - "id": "H1tlwfTyRkWls-C0sarmHw", - "name": "instance-0000000000", - "ephemeral_id": "SBqlp5ywRuuop2gtcdCljA", - "transport_address": "10.43.255.164:19635", - "attributes": {} - }, - "stats": { - "pages_processed": 29, - "documents_processed": 11805, - "documents_indexed": 8, - "documents_deleted": 0, - "trigger_count": 9, - "index_time_in_ms": 52, - "index_total": 7, - "index_failures": 0, - "search_time_in_ms": 201, - "search_total": 29, - "search_failures": 0, - "processing_time_in_ms": 14, - "processing_total": 29, - "delete_time_in_ms": 0, - "exponential_avg_checkpoint_duration_ms": 59.02353261024906, - "exponential_avg_documents_indexed": 0.8762710605864747, - "exponential_avg_documents_processed": 1664.7724779548555 - }, - "checkpointing": { - "last": { - "checkpoint": 8, - "timestamp": "2022-10-17T14:49:50.315Z", - "timestamp_millis": 1666018190315, - "time_upper_bound": "2022-10-17T14:47:50.315Z", - "time_upper_bound_millis": 1666018070315 - }, - "operations_behind": 380, - "changes_last_detected_at_string": "2022-10-17T14:49:50.113Z", - "changes_last_detected_at": 1666018190113, - "last_search_time_string": "2022-10-17T14:49:50.113Z", - "last_search_time": 1666018190113 - } - } - ] -} ----------------------------------- - -Take note of the value from `time_upper_bound_millis` and enter it as a range query for the alerts index. - -Example: - -[source,console] ----------------------------------- -GET .alerts-security.alerts-/_search -{ - "query": { - "range": { - "@timestamp": { - "lt": 1666018070315 - } - } - } -} ----------------------------------- - -If there's no response, verify that relevant <> are running and that alert data is being generated. If there is a response, click *Restart* and allow an hour for the host risk data to appear. \ No newline at end of file diff --git a/docs/experimental-features/images/beaconing-detection-1.png b/docs/experimental-features/images/beaconing-detection-1.png deleted file mode 100644 index a7cc7663f1..0000000000 Binary files a/docs/experimental-features/images/beaconing-detection-1.png and /dev/null differ diff --git a/docs/experimental-features/images/beaconing-detection-2.png b/docs/experimental-features/images/beaconing-detection-2.png deleted file mode 100644 index 1c92671c94..0000000000 Binary files a/docs/experimental-features/images/beaconing-detection-2.png and /dev/null differ diff --git a/docs/experimental-features/images/dashboard.gif b/docs/experimental-features/images/dashboard.gif deleted file mode 100644 index b0f2ca830b..0000000000 Binary files a/docs/experimental-features/images/dashboard.gif and /dev/null differ diff --git a/docs/experimental-features/images/data-tables.png b/docs/experimental-features/images/data-tables.png deleted file mode 100644 index 89acfcc040..0000000000 Binary files a/docs/experimental-features/images/data-tables.png and /dev/null differ diff --git a/docs/experimental-features/images/delete-hrs-saved-objects.png b/docs/experimental-features/images/delete-hrs-saved-objects.png deleted file mode 100644 index c4c05024ad..0000000000 Binary files a/docs/experimental-features/images/delete-hrs-saved-objects.png and /dev/null differ diff --git a/docs/experimental-features/images/delete-hrs-tag.png b/docs/experimental-features/images/delete-hrs-tag.png deleted file mode 100644 index f35ad916d7..0000000000 Binary files a/docs/experimental-features/images/delete-hrs-tag.png and /dev/null differ diff --git a/docs/experimental-features/images/delete-urs-saved-objects.png b/docs/experimental-features/images/delete-urs-saved-objects.png deleted file mode 100644 index 4e41bb8590..0000000000 Binary files a/docs/experimental-features/images/delete-urs-saved-objects.png and /dev/null differ diff --git a/docs/experimental-features/images/delete-urs-tag.png b/docs/experimental-features/images/delete-urs-tag.png deleted file mode 100644 index 030e1e357b..0000000000 Binary files a/docs/experimental-features/images/delete-urs-tag.png and /dev/null differ diff --git a/docs/experimental-features/images/enable-hrs-details-pg.gif b/docs/experimental-features/images/enable-hrs-details-pg.gif deleted file mode 100644 index 14d7898159..0000000000 Binary files a/docs/experimental-features/images/enable-hrs-details-pg.gif and /dev/null differ diff --git a/docs/experimental-features/images/enable-hrs.png b/docs/experimental-features/images/enable-hrs.png deleted file mode 100644 index c77dfb7ce3..0000000000 Binary files a/docs/experimental-features/images/enable-hrs.png and /dev/null differ diff --git a/docs/experimental-features/images/enable-urs.png b/docs/experimental-features/images/enable-urs.png deleted file mode 100644 index e7ffde47ca..0000000000 Binary files a/docs/experimental-features/images/enable-urs.png and /dev/null differ diff --git a/docs/experimental-features/images/feature-flag.png b/docs/experimental-features/images/feature-flag.png deleted file mode 100644 index 55abffa37c..0000000000 Binary files a/docs/experimental-features/images/feature-flag.png and /dev/null differ diff --git a/docs/experimental-features/images/full-dashboard.png b/docs/experimental-features/images/full-dashboard.png deleted file mode 100644 index 073ffec098..0000000000 Binary files a/docs/experimental-features/images/full-dashboard.png and /dev/null differ diff --git a/docs/experimental-features/images/go-to-host-view.png b/docs/experimental-features/images/go-to-host-view.png deleted file mode 100644 index ce81f59580..0000000000 Binary files a/docs/experimental-features/images/go-to-host-view.png and /dev/null differ diff --git a/docs/experimental-features/images/go-to-host.png b/docs/experimental-features/images/go-to-host.png deleted file mode 100644 index e9fead98ee..0000000000 Binary files a/docs/experimental-features/images/go-to-host.png and /dev/null differ diff --git a/docs/experimental-features/images/histogram.png b/docs/experimental-features/images/histogram.png deleted file mode 100644 index 77b9fe6c13..0000000000 Binary files a/docs/experimental-features/images/histogram.png and /dev/null differ diff --git a/docs/experimental-features/images/host-risk-score-dev-tools-console.png b/docs/experimental-features/images/host-risk-score-dev-tools-console.png deleted file mode 100644 index 2e787d6d99..0000000000 Binary files a/docs/experimental-features/images/host-risk-score-dev-tools-console.png and /dev/null differ diff --git a/docs/experimental-features/images/host-risk-score-enable-dev-tools.png b/docs/experimental-features/images/host-risk-score-enable-dev-tools.png deleted file mode 100644 index 7fcea43afa..0000000000 Binary files a/docs/experimental-features/images/host-risk-score-enable-dev-tools.png and /dev/null differ diff --git a/docs/experimental-features/images/host-risk-score-import-dashboard.png b/docs/experimental-features/images/host-risk-score-import-dashboard.png deleted file mode 100644 index e460d4c951..0000000000 Binary files a/docs/experimental-features/images/host-risk-score-import-dashboard.png and /dev/null differ diff --git a/docs/experimental-features/images/host-score-overview.png b/docs/experimental-features/images/host-score-overview.png deleted file mode 100644 index 897603f7f8..0000000000 Binary files a/docs/experimental-features/images/host-score-overview.png and /dev/null differ diff --git a/docs/experimental-features/images/hosts-by-risk-details-page.png b/docs/experimental-features/images/hosts-by-risk-details-page.png deleted file mode 100644 index 7aca2d9346..0000000000 Binary files a/docs/experimental-features/images/hosts-by-risk-details-page.png and /dev/null differ diff --git a/docs/experimental-features/images/hrs-alerts-table.png b/docs/experimental-features/images/hrs-alerts-table.png deleted file mode 100644 index 7691cd8df9..0000000000 Binary files a/docs/experimental-features/images/hrs-alerts-table.png and /dev/null differ diff --git a/docs/experimental-features/images/hrs-all-hosts.png b/docs/experimental-features/images/hrs-all-hosts.png deleted file mode 100644 index dac02c56bb..0000000000 Binary files a/docs/experimental-features/images/hrs-all-hosts.png and /dev/null differ diff --git a/docs/experimental-features/images/hrs-overview-section.png b/docs/experimental-features/images/hrs-overview-section.png deleted file mode 100644 index 7b6bfc34f9..0000000000 Binary files a/docs/experimental-features/images/hrs-overview-section.png and /dev/null differ diff --git a/docs/experimental-features/images/kspm-1.png b/docs/experimental-features/images/kspm-1.png deleted file mode 100644 index c00479230b..0000000000 Binary files a/docs/experimental-features/images/kspm-1.png and /dev/null differ diff --git a/docs/experimental-features/images/kspm-2.png b/docs/experimental-features/images/kspm-2.png deleted file mode 100644 index 161bca094d..0000000000 Binary files a/docs/experimental-features/images/kspm-2.png and /dev/null differ diff --git a/docs/experimental-features/images/restart-hrs.png b/docs/experimental-features/images/restart-hrs.png deleted file mode 100644 index b323cc38f0..0000000000 Binary files a/docs/experimental-features/images/restart-hrs.png and /dev/null differ diff --git a/docs/experimental-features/images/restart-urs.png b/docs/experimental-features/images/restart-urs.png deleted file mode 100644 index 9ff8f7c480..0000000000 Binary files a/docs/experimental-features/images/restart-urs.png and /dev/null differ diff --git a/docs/experimental-features/images/score-in-flyout.png b/docs/experimental-features/images/score-in-flyout.png deleted file mode 100644 index 5aef84bab3..0000000000 Binary files a/docs/experimental-features/images/score-in-flyout.png and /dev/null differ diff --git a/docs/experimental-features/images/select-hrs-dashboard.png b/docs/experimental-features/images/select-hrs-dashboard.png deleted file mode 100644 index 834cfe25a1..0000000000 Binary files a/docs/experimental-features/images/select-hrs-dashboard.png and /dev/null differ diff --git a/docs/experimental-features/images/select-urs-dashboard.png b/docs/experimental-features/images/select-urs-dashboard.png deleted file mode 100644 index 0f64faa16c..0000000000 Binary files a/docs/experimental-features/images/select-urs-dashboard.png and /dev/null differ diff --git a/docs/experimental-features/images/urs-alerts-table.png b/docs/experimental-features/images/urs-alerts-table.png deleted file mode 100644 index 98bf66145e..0000000000 Binary files a/docs/experimental-features/images/urs-alerts-table.png and /dev/null differ diff --git a/docs/experimental-features/images/urs-details-page.png b/docs/experimental-features/images/urs-details-page.png deleted file mode 100644 index d24cb5a8b5..0000000000 Binary files a/docs/experimental-features/images/urs-details-page.png and /dev/null differ diff --git a/docs/experimental-features/images/urs-histogram.png b/docs/experimental-features/images/urs-histogram.png deleted file mode 100644 index fca00bc4e6..0000000000 Binary files a/docs/experimental-features/images/urs-histogram.png and /dev/null differ diff --git a/docs/experimental-features/images/urs-overview-section.png b/docs/experimental-features/images/urs-overview-section.png deleted file mode 100644 index dccc4bf61e..0000000000 Binary files a/docs/experimental-features/images/urs-overview-section.png and /dev/null differ diff --git a/docs/experimental-features/images/urs-score-flyout.png b/docs/experimental-features/images/urs-score-flyout.png deleted file mode 100644 index 9db5cb676a..0000000000 Binary files a/docs/experimental-features/images/urs-score-flyout.png and /dev/null differ diff --git a/docs/experimental-features/images/urs-table.png b/docs/experimental-features/images/urs-table.png deleted file mode 100644 index 8f8eabbe92..0000000000 Binary files a/docs/experimental-features/images/urs-table.png and /dev/null differ diff --git a/docs/experimental-features/images/users-by-risk-details-page.png b/docs/experimental-features/images/users-by-risk-details-page.png deleted file mode 100644 index 28c940deff..0000000000 Binary files a/docs/experimental-features/images/users-by-risk-details-page.png and /dev/null differ diff --git a/docs/experimental-features/images/usr-details-usr-risk-tab.png b/docs/experimental-features/images/usr-details-usr-risk-tab.png deleted file mode 100644 index 57a6d112b0..0000000000 Binary files a/docs/experimental-features/images/usr-details-usr-risk-tab.png and /dev/null differ diff --git a/docs/experimental-features/user-risk-score.asciidoc b/docs/experimental-features/user-risk-score.asciidoc deleted file mode 100644 index 3161f265dd..0000000000 --- a/docs/experimental-features/user-risk-score.asciidoc +++ /dev/null @@ -1,309 +0,0 @@ -[[user-risk-score]] -== User risk score - -NOTE: This page refers to the original user and host risk score modules. If you have the original modules installed, and you're running {stack} version 8.11 or newer, you can <>. -For information about the latest risk engine, refer to <>. - -NOTE: This feature is available for {stack} versions 8.3.0 and newer and requires a https://www.elastic.co/pricing[Platinum subscription] or higher. - -The user risk score feature highlights risky usernames in your environment. It utilizes a transform with a scripted metric aggregation to calculate user risk scores based on alerts generated within the past 90 days. The transform runs hourly to update scores as new alerts are generated. - -Each alert's contribution to the user risk score is based on the alert's risk score (`signal.rule.risk_score`). The risk score is calculated using a weighted sum where rules with higher time-corrected risk scores also have higher weights. Each risk score is normalized on a scale of 0 to 100. - -The following table shows how risk levels are applied to a username, based on the normalized risk score: - -[width="100%",options="header"] -|============================================== -|Risk level |User risk score - -|Unknown |< 20 -|Low |20-40 -|Moderate |40-70 -|High | 70-90 -|Critical | > 90 - -|============================================== - -[discrete] -[[deploy-user-risk-score]] -=== Enable user risk score - -You can enable user risk score from the following places in the {security-app}: - -* The Entity Analytics dashboard -* The *User risk* tab on the Users page -* The *User risk* tab on a user's details page - -Or, in {kib}, you can enable user risk score in Console. - -To enable user risk score from the Entity Analytics dashboard: - -. In the {security-app}, go to *Dashboards* -> *Entity Analytics*. -. In the User Risk Scores section, click *Enable* to install the module. - - -To enable user risk score from the Users page: - -. Go to *Explore* -> *Users*. -. Select the *User risk* tab, then click *Enable* to install the module. - -[role="screenshot"] -image::images/enable-urs.png[Enable User Risk score button] - -To enable user risk score from a user's details page: - -. Go to *Explore* -> *Users*. -. Select the *All users* tab, then click a user name. -. On the details page, scroll down to the data tables, then select the *User risk* tab. -. Click *Enable* to install the module. - -To enable user risk score from Console in {kib}, open a browser window and enter the following URL: - -[source,console] ----------------------------------- -{KibanaURL}/s/{spaceID}/app/dev_tools#/console?load_from={KibanaURL}/s/{spaceID}/internal/risk_score/prebuilt_content/dev_tool/enable_user_risk_score ----------------------------------- - -NOTE: If there's existing content in Console, scroll to the bottom to find the output loaded. - -TIP: If you receive an error message during the installation process, delete the user risk score module manually, and then re-enable it. Refer to <> for more information. - -[[upgrade-user-risk-score]] -[discrete] -=== Upgrade user risk score - -If you previously enabled user risk score and you're upgrading to {stack} version 8.11 or newer, you can <>. - -Before upgrading, note the following: - -* Since older data is not preserved, previous user risk scores will be deleted, and new scores will be created. However, if you want to retain old user risk scores, you can reindex them _before_ upgrading. To learn how, refer to {ref}/docs-reindex.html[Reindex API]. New data will be stored in the `ml_user_risk_score_` and `ml_user_risk_score_latest_` indices. - -* You must edit your {cloud}/ec-manage-kibana-settings.html#ec-manage-kibana-settings[{kib} user settings] and remove the `xpack.securitySolution.enableExperimental:['riskyUsersEnabled']` feature flag. - -After this is done, you can proceed with upgrading the user risk score feature from any of the following places in the {security-app}: - -* The Entity Analytics dashboard -* The *User risk* tab on the User page -* The *User risk* tab on a user's details page - -NOTE: After you enable or upgrade user risk score, you might get a message that says, "No user risk score data available to display." To verify that the transform that installs the user risk score module is picking up data, refer to <>. - -TIP: If you receive an error message during the installation process, delete the user risk score module manually, and then re-enable it. Refer to <> for more information. - -[[view-user-risk-score]] -[discrete] -=== Analyze user risk score data - -It is recommended you analyze users with the highest risk scores first -- those in the `Critical` and `Moderate` categories. User risk score data appears in the following places in the {security-app}: - -The `user.risk.calculated_level` column in the Alerts table: - -[role="screenshot"] -image::images/urs-alerts-table.png[User risk score in Alerts table] - -The *Insights* -> *Entities* section on the *Overview* tab within the alert details flyout - -[role="screenshot"] -image::images/urs-score-flyout.png[User risk score in alert details flyout,65%] - -The *User risk* tab on the Users page: - -[role="screenshot"] -image::images/users-by-risk-details-page.png[User risk score on Users risk tab] - -The Overview section on the user details page: - -[role="screenshot"] -image::images/urs-overview-section.png[User risk score in Overview section] - -The *User risk* tab on the user details page: - -[role="screenshot"] -image::images/usr-details-usr-risk-tab.png[User risk score on the user details page] - -You can also visualize user risk score data using prebuilt dashboards that are automatically imported when the feature is enabled. - -To access the dashboards: - -. In {kib}, go to *Analytics -> Dashboard*, then search for `risk score`. -. Select *Drilldown of User Risk Score* to analyze the risk components of a user, or *Current Risk Score for Users* to display a list of current risky users in your environment. - -In this example, we'll explore the *Drilldown of User Risk Score* dashboard. - -[role="screenshot"] -image::images/select-urs-dashboard.png[Select dashboard] - -The histogram shows historical changes in a particular user's risk score(s). To specify a date range, use the date and time picker, or drag and select a time range within the histogram. Click *View source dashboard* to view the top values of `user.name` and `risk.keyword`. - -[role="screenshot"] -image::images/urs-histogram.png[User risk score histogram] - -The data tables beneath the histogram display associated rules, users, and MITRE ATT&CK tactics seen for risky users. By default, the tables are sorted by risk, with the highest total risk scores at the top. Use this information to triage your highest risk users. - -[role="screenshot"] -image::images/dashboard.gif[User risk score dashboard] - -[[troubleshoot-user-risk-score]] -[discrete] -=== Troubleshooting - -During the installation or upgrade process, you may receive the following error messages: - -* `Saved object already exists` -* `Transform already exists` -* `Ingest pipeline already exists` - -In this case, we recommend that you manually delete the user risk score module, and then re-enable it. To manually delete the module: - -. Delete the user risk score saved objects: -.. From the {kib} main menu, go to **Stack Management** -> **Kibana** -> **Saved Objects**. -.. Delete the saved objects that have the `User Risk Score - ` tag. -+ -[role="screenshot"] -image::images/delete-urs-saved-objects.png[Delete user risk score saved objects] -.. Delete the `User Risk Score - ` tag. -+ -[role="screenshot"] -image::images/delete-urs-tag.png[Delete user risk score tag] -. Stop and delete the user risk score transforms. You can do this using the {kib} UI or the {ref}/stop-transform.html[Stop transform API] and {ref}/delete-transform.html[Delete transform API]. -** To delete the user risk score transforms using the {kib} UI: -.. From the {kib} main menu, go to **Stack Management** -> **Data** -> **Transforms**. -.. Stop the following transforms, then delete them: -*** `ml_userriskscore_latest_transform_` -*** `ml_userriskscore_pivot_transform_` -** To delete the user risk score transforms using the API, run the following commands in Console: -.. Stop and delete the latest transform: -+ -[source,console] ----------------------------------- -POST _transform/ml_userriskscore_latest_transform_/_stop -DELETE _transform/ml_userriskscore_latest_transform_ ----------------------------------- -.. Stop and delete the pivot transform: -+ -[source,console] ----------------------------------- -POST _transform/ml_userriskscore_pivot_transform_/_stop -DELETE _transform/ml_userriskscore_pivot_transform_ ----------------------------------- -. Delete the user risk score ingest pipeline. You can do this using the {kib} UI or the {ref}/delete-pipeline-api.html[Delete pipeline API]. -** To delete the user risk score ingest pipeline using the {kib} UI: -.. From the {kib} main menu, go to **Stack Management** -> **Ingest** -> **Ingest Pipelines**. -.. Delete the `ml_userriskscore_ingest_pipeline_` ingest pipeline. -** To delete the user risk score ingest pipeline using the Delete pipeline API, run the following command in Console: -+ -[source,console] ----------------------------------- -DELETE /_ingest/pipeline/ml_userriskscore_ingest_pipeline_ ----------------------------------- -. Delete the stored user risk score scripts using the {ref}/delete-stored-script-api.html[Delete stored script API]. In Console, run the following commands: -+ -[source,console] ----------------------------------- -DELETE _scripts/ml_userriskscore_levels_script_ -DELETE _scripts/ml_userriskscore_map_script_ -DELETE _scripts/ml_userriskscore_reduce_script_ ----------------------------------- - -After manually deleting the user risk score saved objects, transforms, ingest pipeline, and stored scripts, follow the steps to <>. - -[[verify-user-risk-score]] -=== Verify that user risk score data installed successfully (Optional) - -After you enable or upgrade user risk score, the following message may appear: - -[role="screenshot"] -image::images/restart-urs.png[Restart user risk score] - -If so, click *Restart* and allow at least an hour for the data to be generated. If data still doesn't appear, verify that user risk score data has been generated: - -In {kib}, run the following commands in Console to query the `ml_user_risk_score_` index: - -[source,console] ----------------------------------- -GET ml_user_risk_score_/_search ----------------------------------- - -If no data returns, you'll need to check if the alerts index (`.alerts-security.alerts-`) had alert data when `ml_userriskscore_pivot_transform_` was started. - -Example: - -[source,console] ----------------------------------- -GET transform/ml_userriskscore_pivot_transform_/_stats?human=true ----------------------------------- - -Here's an example response: - -[source,console] ----------------------------------- -{ - "count": 1, - "transforms": [ - { - "id": "ml_userriskscore_pivot_transform_", - "state": "started", - "node": { - "id": "H1tlwfTyRkWls-C0sarmHw", - "name": "instance-0000000000", - "ephemeral_id": "SBqlp5ywRuuop2gtcdCljA", - "transport_address": "10.43.255.164:19635", - "attributes": {} - }, - "stats": { - "pages_processed": 29, - "documents_processed": 11805, - "documents_indexed": 8, - "documents_deleted": 0, - "trigger_count": 9, - "index_time_in_ms": 52, - "index_total": 7, - "index_failures": 0, - "search_time_in_ms": 201, - "search_total": 29, - "search_failures": 0, - "processing_time_in_ms": 14, - "processing_total": 29, - "delete_time_in_ms": 0, - "exponential_avg_checkpoint_duration_ms": 59.02353261024906, - "exponential_avg_documents_indexed": 0.8762710605864747, - "exponential_avg_documents_processed": 1664.7724779548555 - }, - "checkpointing": { - "last": { - "checkpoint": 8, - "timestamp": "2022-10-17T14:49:50.315Z", - "timestamp_millis": 1666018190315, - "time_upper_bound": "2022-10-17T14:47:50.315Z", - "time_upper_bound_millis": 1666018070315 - }, - "operations_behind": 380, - "changes_last_detected_at_string": "2022-10-17T14:49:50.113Z", - "changes_last_detected_at": 1666018190113, - "last_search_time_string": "2022-10-17T14:49:50.113Z", - "last_search_time": 1666018190113 - } - } - ] -} ----------------------------------- - -Take note of the value from `time_upper_bound_millis` and enter it as a range query for the alerts index. - -Example: - -[source,console] ----------------------------------- -GET .alerts-security.alerts-/_search -{ - "query": { - "range": { - "@timestamp": { - "lt": 1666018070315 - } - } - } -} ----------------------------------- - -If there's no response, verify that relevant <> are running and that alert data is being generated. If there is a response, click *Restart* and allow an hour for the user risk data to appear. \ No newline at end of file diff --git a/docs/getting-started/users-page.asciidoc b/docs/getting-started/users-page.asciidoc index 35ba5fb8a7..f7df997574 100644 --- a/docs/getting-started/users-page.asciidoc +++ b/docs/getting-started/users-page.asciidoc @@ -24,7 +24,7 @@ Beneath the KPI charts are data tables, which are useful for viewing and investi * *All users*: A chronological list of unique user names, when they were last active, and the associated domains. * *Authentications*: A chronological list of user authentication events and associated details, such as the number of successes and failures, and the host name of the last successful destination. * *Anomalies*: Unusual activity discovered by machine learning jobs that contain user data. -* *User risk*: The latest recorded user risk score for each user, and its user risk classification. This feature requires a https://www.elastic.co/pricing[Platinum subscription] or higher and must be enabled to display the data. Click *Enable* on the *User risk* tab to get started. To learn more, refer to our <>. +* *User risk*: The latest recorded user risk score for each user, and its user risk classification. This feature requires a https://www.elastic.co/pricing[Platinum subscription] or higher and must be enabled to display the data. Click *Enable* on the *User risk* tab to get started. To learn more, refer to our <>. The Events table includes inline actions and several customization options. To learn more about what you can do with the data in these tables, refer to <>. diff --git a/docs/index.asciidoc b/docs/index.asciidoc index 6791f36c90..0194e513d9 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -46,8 +46,6 @@ include::reference/ref-index.asciidoc[] include::troubleshooting/troubleshooting-intro.asciidoc[] -include::experimental-features/experimental-features-intro.asciidoc[] - include::release-notes.asciidoc[] include::detections/prebuilt-rules/downloadable-packages/0-13-1/prebuilt-rules-0-13-1-appendix.asciidoc[] diff --git a/docs/management/hosts/hosts-overview.asciidoc b/docs/management/hosts/hosts-overview.asciidoc index d50ecfc63d..b8d12049cb 100644 --- a/docs/management/hosts/hosts-overview.asciidoc +++ b/docs/management/hosts/hosts-overview.asciidoc @@ -26,7 +26,7 @@ Beneath the KPI charts are data tables, categorized by individual tabs, which ar * *All hosts*: High-level host details. * *Uncommon processes*: Uncommon processes running on hosts. * *Anomalies*: Anomalies discovered by machine learning jobs. -* *Host risk*: The latest recorded host risk score for each host, and its host risk classification. This feature requires a https://www.elastic.co/pricing[Platinum subscription] or higher and must be enabled to display the data. Click *Enable* on the *Host risk* tab to get started. To learn more, refer to our <>. +* *Host risk*: The latest recorded host risk score for each host, and its host risk classification. This feature requires a https://www.elastic.co/pricing[Platinum subscription] or higher and must be enabled to display the data. Click *Enable* on the *Host risk* tab to get started. To learn more, refer to our <>. * *Sessions*: Linux process events that you can open in <>, an investigation tool that allows you to examine Linux process data at a hierarchal level. The tables within the *Events* and *Sessions* tabs include inline actions and several customization options. To learn more about what you can do with the data in these tables, refer to <>. diff --git a/docs/release-notes/8.14.asciidoc b/docs/release-notes/8.14.asciidoc index 477d12c701..110108aff8 100644 --- a/docs/release-notes/8.14.asciidoc +++ b/docs/release-notes/8.14.asciidoc @@ -1,6 +1,16 @@ [[release-notes-header-8.14.0]] == 8.14 +[discrete] +[[release-notes-8.14.3]] +=== 8.14.3 + +[discrete] +[[bug-fixes-8.14.3]] +==== Bug fixes + +* Fixes a bug that prevented widgets on the Alerts page from updating after the status of alerts grouped by `rule.name` was changed with a bulk action ({kibana-pull}183674[#183674]). + [discrete] [[release-notes-8.14.2]] === 8.14.2 diff --git a/docs/serverless/assistant/ai-assistant-alert-triage.mdx b/docs/serverless/AI-for-security/ai-assistant-alert-triage.mdx similarity index 100% rename from docs/serverless/assistant/ai-assistant-alert-triage.mdx rename to docs/serverless/AI-for-security/ai-assistant-alert-triage.mdx diff --git a/docs/serverless/assistant/ai-assistant-esql-queries.mdx b/docs/serverless/AI-for-security/ai-assistant-esql-queries.mdx similarity index 100% rename from docs/serverless/assistant/ai-assistant-esql-queries.mdx rename to docs/serverless/AI-for-security/ai-assistant-esql-queries.mdx diff --git a/docs/serverless/assistant/ai-assistant.mdx b/docs/serverless/AI-for-security/ai-assistant.mdx similarity index 100% rename from docs/serverless/assistant/ai-assistant.mdx rename to docs/serverless/AI-for-security/ai-assistant.mdx diff --git a/docs/serverless/AI-for-security/ai-for-security-landing-pg.mdx b/docs/serverless/AI-for-security/ai-for-security-landing-pg.mdx new file mode 100644 index 0000000000..b4a5b206ac --- /dev/null +++ b/docs/serverless/AI-for-security/ai-for-security-landing-pg.mdx @@ -0,0 +1,8 @@ +--- +slug: /serverless/security/ai-for-security +title: AI for security +description: Learn about Elastic's native AI security tools. +tags: [ 'serverless', 'security', 'overview', 'LLM', 'artificial intelligence' ] +status: in review +--- +You can use ((elastic-sec))’s built-in AI tools to speed up your work and augment your team’s capabilities. The pages in this section describe , which answers questions and enhances your workflows throughout Elastic Security, and , which speeds up the triage process by finding patterns and identifying attacks spanning multiple alerts. \ No newline at end of file diff --git a/docs/serverless/assistant/ai-use-cases.mdx b/docs/serverless/AI-for-security/ai-use-cases.mdx similarity index 100% rename from docs/serverless/assistant/ai-use-cases.mdx rename to docs/serverless/AI-for-security/ai-use-cases.mdx diff --git a/docs/serverless/attack-discovery/attack-discovery.mdx b/docs/serverless/AI-for-security/attack-discovery.mdx similarity index 90% rename from docs/serverless/attack-discovery/attack-discovery.mdx rename to docs/serverless/AI-for-security/attack-discovery.mdx index 6e910f541e..1603aea9ae 100644 --- a/docs/serverless/attack-discovery/attack-discovery.mdx +++ b/docs/serverless/AI-for-security/attack-discovery.mdx @@ -41,7 +41,13 @@ While Attack discovery is compatible with many different models, our testing fou 3. Once you've selected a connector, click **Generate** to start the analysis. -It may take from a few seconds up to several minutes to generate discoveries, depending on the number of alerts and the model you selected. Note that Attack discovery is in technical preview and will only analyze opened and acknowleged alerts from the past 24 hours. +It may take from a few seconds up to several minutes to generate discoveries, depending on the number of alerts and the model you selected. + + +Attack discovery is in technical preview and will only analyze opened and acknowleged alerts from the past 24 hours. By default it only analyzes up to 20 alerts within this timeframe, but you can expand this up to 100 by going to **AI Assistant → Settings () → Knowledge Base** and updating the **Alerts** setting. + + +![AI Assistant knowledge base menu](../images/ai-assistant/assistant-kb-menu.png) diff --git a/docs/serverless/assistant/connect-to-azure-openai.mdx b/docs/serverless/AI-for-security/connect-to-azure-openai.mdx similarity index 100% rename from docs/serverless/assistant/connect-to-azure-openai.mdx rename to docs/serverless/AI-for-security/connect-to-azure-openai.mdx diff --git a/docs/serverless/assistant/connect-to-bedrock.mdx b/docs/serverless/AI-for-security/connect-to-bedrock.mdx similarity index 100% rename from docs/serverless/assistant/connect-to-bedrock.mdx rename to docs/serverless/AI-for-security/connect-to-bedrock.mdx diff --git a/docs/serverless/AI-for-security/connect-to-byo-llm.mdx b/docs/serverless/AI-for-security/connect-to-byo-llm.mdx new file mode 100644 index 0000000000..ccbb6e3cec --- /dev/null +++ b/docs/serverless/AI-for-security/connect-to-byo-llm.mdx @@ -0,0 +1,174 @@ +--- +slug: /serverless/security/connect-to-byo-llm +title: Connect to your own local LLM +description: Set up a connector to LM Studio so you can use a local model with AI Assistant. +tags: ["security", "overview", "get-started"] +status: in review +--- + +This page provides instructions for setting up a connector to a large language model (LLM) of your choice using LM Studio. This allows you to use your chosen model within ((elastic-sec)). You'll first need to set up a reverse proxy to communicate with ((elastic-sec)), then set up LM Studio on a server, and finally configure the connector in your ((elastic-sec)) project. [Learn more about the benefits of using a local LLM](https://www.elastic.co/blog/ai-assistant-locally-hosted-models). + +This example uses a single server hosted in GCP to run the following components: +- LM Studio with the [Mixtral-8x7b](https://mistral.ai/technology/#models) model +- A reverse proxy using Nginx to authenticate to Elastic Cloud + + + + + +For testing, you can use alternatives to Nginx such as [Azure Dev Tunnels](https://learn.microsoft.com/en-us/azure/developer/dev-tunnels/overview) or [Ngrok](https://ngrok.com/), but using Nginx makes it easy to collect additional telemetry and monitor its status by using Elastic's native Nginx integration. While this example uses cloud infrastructure, it could also be replicated locally without an internet connection. + + +## Configure your reverse proxy + + +If your Elastic instance is on the same host as LM Studio, you can skip this step. + + +You need to set up a reverse proxy to enable communication between LM Studio and Elastic. For more complete instructions, refer to a guide such as [this one](https://www.digitalocean.com/community/tutorials/how-to-configure-nginx-as-a-reverse-proxy-on-ubuntu-22-04). + +The following is an example Nginx configuration file: +``` +server { + listen 80; + listen [::]:80; + server_name ; + server_tokens off; + add_header x-xss-protection "1; mode=block" always; + add_header x-frame-options "SAMEORIGIN" always; + add_header X-Content-Type-Options "nosniff" always; + return 301 https://$server_name$request_uri; +} + +server { + + listen 443 ssl http2; + listen [::]:443 ssl http2; + server_name ; + server_tokens off; + ssl_certificate /etc/letsencrypt/live//fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live//privkey.pem; + ssl_session_timeout 1d; + ssl_session_cache shared:SSL:50m; + ssl_session_tickets on; + ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256'; + ssl_protocols TLSv1.3 TLSv1.2; + ssl_prefer_server_ciphers on; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; + add_header x-xss-protection "1; mode=block" always; + add_header x-frame-options "SAMEORIGIN" always; + add_header X-Content-Type-Options "nosniff" always; + add_header Referrer-Policy "strict-origin-when-cross-origin" always; + ssl_stapling on; + ssl_stapling_verify on; + ssl_trusted_certificate /etc/letsencrypt/live//fullchain.pem; + resolver 1.1.1.1; + location / { + + if ($http_authorization != "Bearer ") { + return 401; +} + + proxy_pass http://localhost:1234/; + } + +} +``` + + +* Replace `` with your actual token, and keep it safe since you'll need it to set up the ((elastic-sec)) connector. +* Replace `` with your actual domain name. +* Update the `proxy_pass` value at the bottom of the configuration if you decide to change the port number in LM Studio to something other than 1234. + + +### (Optional) Set up performance monitoring for your reverse proxy +You can use Elastic's [Nginx integration](https://www.elastic.co/docs/current/integrations/nginx) to monitor performance and populate monitoring dashboards in the ((security-app)). + +## Configure LM Studio and download a model + +First, install [LM Studio](https://lmstudio.ai/). LM Studio supports the OpenAI SDK, which makes it compatible with Elastic's OpenAI connector, allowing you to connect to any model available in the LM Studio marketplace. + +One current limitation of LM Studio is that when it is installed on a server, you must launch the application using its GUI before doing so using the CLI. For example, by using Chrome RDP with an [X Window System](https://cloud.google.com/architecture/chrome-desktop-remote-on-compute-engine). After you've opened the application the first time using the GUI, you can start it by using `sudo lms server start` in the CLI. + +Once you've launched LM Studio: + +1. Go to LM Studio's Search window. +1. Search for an LLM (for example, `Mixtral-8x7B-instruct`). Your chosen model must include `instruct` in its name in order to work with Elastic. +1. Filter your search for "Compatibility Guess" to optimize results for your hardware. Results will be color coded: + * Green means "Full GPU offload possible", which yields the best results. + * Blue means "Partial GPU offload possible", which may work. + * Red for "Likely too large for this machine", which typically will not work. +1. Download one or more models. + + +For security reasons, before downloading a model, verify that it is from a trusted source. It can be helpful to review community feedback on the model (for example using a site like Hugging Face). + + + + +In this example we used [`TheBloke/Mixtral-8x7B-Instruct-v0.1.Q3_K_M.gguf`](https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF). It has 46.7B total parameters, a 32,000 token context window, and uses GGUF [quanitization](https://huggingface.co/docs/transformers/main/en/quantization/overview). For more information about model names and format information, refer to the following table. + +| Model Name | Parameter Size | Tokens/Context Window | Quantization Format | +|------------|----------------|-----------------------|---------------------| +| Name of model, sometimes with a version number. | LLMs are often compared by their number of parameters — higher numbers mean more powerful models. | Tokens are small chunks of input information. Tokens do not necessarily correspond to characters. You can use [Tokenizer](https://platform.openai.com/tokenizer) to see how many tokens a given prompt might contain. | Quantization reduces overall parameters and helps the model to run faster, but reduces accuracy. | +| Examples: Llama, Mistral, Phi-3, Falcon. | The number of parameters is a measure of the size and the complexity of the model. The more parameters a model has, the more data it can process, learn from, generate, and predict. | The context window defines how much information the model can process at once. If the number of input tokens exceeds this limit, input gets truncated. | Specific formats for quantization vary, most models now support GPU rather than CPU offloading. | + + +## Load a model in LM Studio + +After downloading a model, load it in LM Studio using the GUI or LM Studio's [CLI tool](https://lmstudio.ai/blog/lms). + +### Option 1: load a model using the CLI (Recommended) + +It is a best practice to download models from the marketplace using the GUI, and then load or unload them using the CLI. The GUI allows you to search for models, whereas the CLI only allows you to import specific paths, but the CLI provides a good interface for loading and unloading. + +Use the following commands in your CLI: + +1. Verify LM Studio is installed: `lms` +2. Check LM Studio's status: `lms status` +3. List all downloaded models: `lms ls` +4. Load a model: `lms load` + + + +After the model loads, you should see a `Model loaded successfully` message in the CLI. + + + +To verify which model is loaded, use the `lms ps` command. + + + +If your model uses NVIDIA drivers, you can check the GPU performance with the `sudo nvidia-smi` command. + +### Option 2: load a model using the GUI + +Refer to the following video to see how to load a model using LM Studio's GUI. You can change the **port** setting, which is referenced in the Nginx configuration file. Note that the **GPU offload** was set to **Max**. + + + +## (Optional) Collect logs using Elastic's Custom Logs integration + +You can monitor the performance of the host running LM Studio using Elastic's [Custom Logs integration](https://www.elastic.co/docs/current/integrations/log). This can also help with troubleshooting. Note that the default path for LM Studio logs is `/tmp/lmstudio-server-log.txt`, as in the following screenshot: + + + +## Configure the connector in ((elastic-sec)) + +Finally, configure the connector in your Security project: + +1. Log in to your Security project. +2. Navigate to **Stack Management → Connectors → Create Connector → OpenAI**. The OpenAI connector enables this use case because LM Studio uses the OpenAI SDK. +3. Name your connector to help keep track of the model version you are using. +4. Under **URL**, enter the domain name specified in your Nginx configuration file, followed by `/v1/chat/completions`. +5. Under **Default model**, enter `local-model`. +6. Under **API key**, enter the secret token specified in your Nginx configuration file. +7. Click **Save**. + + + +Setup is now complete. You can use the model you've loaded in LM Studio to power Elastic's generative AI features. You can test a variety of models as you interact with AI Assistant to see what works best without having to update your connector. + + +While local models work well for , we recommend you use one of for interacting with . As local models become more performant over time, this is likely to change. + \ No newline at end of file diff --git a/docs/serverless/assistant/connect-to-openai.mdx b/docs/serverless/AI-for-security/connect-to-openai.mdx similarity index 100% rename from docs/serverless/assistant/connect-to-openai.mdx rename to docs/serverless/AI-for-security/connect-to-openai.mdx diff --git a/docs/serverless/assistant/connect-to-vertex.mdx b/docs/serverless/AI-for-security/connect-to-vertex.mdx similarity index 100% rename from docs/serverless/assistant/connect-to-vertex.mdx rename to docs/serverless/AI-for-security/connect-to-vertex.mdx diff --git a/docs/serverless/assistant/images/attck-disc-11-alerts-disc.png b/docs/serverless/AI-for-security/images/attck-disc-11-alerts-disc.png similarity index 100% rename from docs/serverless/assistant/images/attck-disc-11-alerts-disc.png rename to docs/serverless/AI-for-security/images/attck-disc-11-alerts-disc.png diff --git a/docs/serverless/assistant/images/attck-disc-esql-query-gen-example.png b/docs/serverless/AI-for-security/images/attck-disc-esql-query-gen-example.png similarity index 100% rename from docs/serverless/assistant/images/attck-disc-esql-query-gen-example.png rename to docs/serverless/AI-for-security/images/attck-disc-esql-query-gen-example.png diff --git a/docs/serverless/AI-for-security/images/lms-cli-welcome.png b/docs/serverless/AI-for-security/images/lms-cli-welcome.png new file mode 100644 index 0000000000..c857d01454 Binary files /dev/null and b/docs/serverless/AI-for-security/images/lms-cli-welcome.png differ diff --git a/docs/serverless/AI-for-security/images/lms-custom-logs-config.png b/docs/serverless/AI-for-security/images/lms-custom-logs-config.png new file mode 100644 index 0000000000..35e82e89cd Binary files /dev/null and b/docs/serverless/AI-for-security/images/lms-custom-logs-config.png differ diff --git a/docs/serverless/AI-for-security/images/lms-edit-connector.png b/docs/serverless/AI-for-security/images/lms-edit-connector.png new file mode 100644 index 0000000000..0359253eb1 Binary files /dev/null and b/docs/serverless/AI-for-security/images/lms-edit-connector.png differ diff --git a/docs/serverless/AI-for-security/images/lms-model-select.png b/docs/serverless/AI-for-security/images/lms-model-select.png new file mode 100644 index 0000000000..454fa2a1ab Binary files /dev/null and b/docs/serverless/AI-for-security/images/lms-model-select.png differ diff --git a/docs/serverless/AI-for-security/images/lms-ps-command.png b/docs/serverless/AI-for-security/images/lms-ps-command.png new file mode 100644 index 0000000000..af72b6976c Binary files /dev/null and b/docs/serverless/AI-for-security/images/lms-ps-command.png differ diff --git a/docs/serverless/AI-for-security/images/lms-studio-arch-diagram.png b/docs/serverless/AI-for-security/images/lms-studio-arch-diagram.png new file mode 100644 index 0000000000..4b737bbb7c Binary files /dev/null and b/docs/serverless/AI-for-security/images/lms-studio-arch-diagram.png differ diff --git a/docs/serverless/AI-for-security/images/lms-studio-model-loaded-msg.png b/docs/serverless/AI-for-security/images/lms-studio-model-loaded-msg.png new file mode 100644 index 0000000000..c2e3ec8114 Binary files /dev/null and b/docs/serverless/AI-for-security/images/lms-studio-model-loaded-msg.png differ diff --git a/docs/serverless/assistant/llm-connector-guides.mdx b/docs/serverless/AI-for-security/llm-connector-guides.mdx similarity index 90% rename from docs/serverless/assistant/llm-connector-guides.mdx rename to docs/serverless/AI-for-security/llm-connector-guides.mdx index fcdedd575a..31036a8376 100644 --- a/docs/serverless/assistant/llm-connector-guides.mdx +++ b/docs/serverless/AI-for-security/llm-connector-guides.mdx @@ -14,4 +14,5 @@ Setup guides are available for the following LLM providers: * * * +* diff --git a/docs/serverless/assistant/llm-performance-matrix.mdx b/docs/serverless/AI-for-security/llm-performance-matrix.mdx similarity index 100% rename from docs/serverless/assistant/llm-performance-matrix.mdx rename to docs/serverless/AI-for-security/llm-performance-matrix.mdx diff --git a/docs/serverless/assistant/usecase-attack-disc-ai-assistant-incident-reporting.mdx b/docs/serverless/AI-for-security/usecase-attack-disc-ai-assistant-incident-reporting.mdx similarity index 100% rename from docs/serverless/assistant/usecase-attack-disc-ai-assistant-incident-reporting.mdx rename to docs/serverless/AI-for-security/usecase-attack-disc-ai-assistant-incident-reporting.mdx diff --git a/docs/serverless/cloud-native-security/d4c-get-started.mdx b/docs/serverless/cloud-native-security/d4c-get-started.mdx index e1b7be3aa6..baed5754df 100644 --- a/docs/serverless/cloud-native-security/d4c-get-started.mdx +++ b/docs/serverless/cloud-native-security/d4c-get-started.mdx @@ -7,6 +7,9 @@ status: in review --- + + +
This page describes how to set up Cloud Workload Protection (CWP) for Kubernetes. diff --git a/docs/serverless/cloud-native-security/d4c-overview.mdx b/docs/serverless/cloud-native-security/d4c-overview.mdx index e7db10007b..9cfc66c674 100644 --- a/docs/serverless/cloud-native-security/d4c-overview.mdx +++ b/docs/serverless/cloud-native-security/d4c-overview.mdx @@ -7,6 +7,9 @@ status: in review --- + + +
Elastic Cloud Workload Protection (CWP) for Kubernetes provides cloud-native runtime protections for containerized environments by identifying and optionally blocking unexpected system behavior in Kubernetes containers. diff --git a/docs/serverless/serverless-security.docnav.json b/docs/serverless/serverless-security.docnav.json index d56e4e5b22..07682a08d4 100644 --- a/docs/serverless/serverless-security.docnav.json +++ b/docs/serverless/serverless-security.docnav.json @@ -18,15 +18,17 @@ { "slug": "/serverless/security/security-ui", "classic-sources": [ "enSecurityEsUiOverview" ] - }, - { - "slug": "/serverless/security/attack-discovery" - }, + }, { - "label": "AI Assistant", - "slug": "/serverless/security/ai-assistant", - "classic-sources": [ "enSecuritySecurityAssistant" ], + "label": "AI for security", + "slug": "/serverless/security/ai-for-security", "items": [ + { + "slug": "/serverless/security/ai-assistant" + }, + { + "slug": "/serverless/security/attack-discovery" + }, { "slug": "/serverless/security/llm-connector-guides", "items": [ @@ -41,6 +43,9 @@ }, { "slug": "/serverless/security/connect-to-google-vertex" + }, + { + "slug": "/serverless/security/connect-to-byo-llm" } ] },