diff --git a/.gitignore b/.gitignore index eaf5a0f9..98366f36 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ eeauditor/processor/outputs/*.svg eeauditor/processor/outputs/*.html LOCAL_external_providers.toml +output.json +output_ocsf_v1-4-0_events.json diff --git a/Dockerfile b/Dockerfile index 6fdb6b8c..2151777e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -30,7 +30,7 @@ COPY requirements-docker.txt /tmp/requirements-docker.txt RUN \ apk update && \ apk add --no-cache python3 postgresql-libs && \ - apk add --no-cache --virtual .build-deps gcc zlib-dev python3-dev musl-dev postgresql-dev && \ + apk add --no-cache --virtual .build-deps g++ gcc zlib-dev python3-dev musl-dev postgresql-dev && \ python3 -m venv /opt/venv && \ source /opt/venv/bin/activate && \ python3 -m ensurepip && \ diff --git a/README.md b/README.md index 8c5530aa..2f50af52 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ ElectricEye is a multi-cloud, multi-SaaS Python CLI tool for Asset Management, S ## Workflow -![Architecture](./screenshots/ElectricEyeAnimated.gif) +![Architecture](./screenshots/electrice_eye_architecture.jpg) ## Quick Run Down :running: :running: @@ -74,37 +74,39 @@ python3 eeauditor/controller.py --help Usage: controller.py [OPTIONS] Options: - -t, --target-provider [AWS|Azure|OCI|GCP|Servicenow|M365|Salesforce] - CSP or SaaS Vendor Assessment Target, ensure - that any -a or -c arg maps to your target - provider e.g., -t AWS -a - Amazon_APGIW_Auditor - -a, --auditor-name TEXT Specify which Auditor you want to run by - using its name NOT INCLUDING .py. Defaults - to ALL Auditors - -c, --check-name TEXT A specific Check in a specific Auditor you + -t, --target-provider [AWS|Azure|OCI|GCP|Servicenow|M365|Salesforce|Snowflake] + Public cloud or SaaS assessment target, + ensure that any -a or -c arg maps to your + target provider to avoid any errors. e.g., + -t AWS -a Amazon_APGIW_Auditor + -a, --auditor-name TEXT Specify which Auditor you want to run by + using its name NOT INCLUDING .py. . Use the + --list-checks arg to receive a list. + Defaults to ALL Auditors + -c, --check-name TEXT A specific Check in a specific Auditor you want to run, this correlates to the function - name. Defaults to ALL Checks - -d, --delay INTEGER Time in seconds to sleep between Auditors - being ran, defaults to 0 - -o, --outputs TEXT A list of Outputs (files, APIs, databases, - ChatOps) to send ElectricEye Findings, - specify multiple with additional arguments: - -o csv -o postgresql -o slack [default: - stdout] - --output-file TEXT For file outputs such as JSON and CSV, the - name of the file, DO NOT SPECIFY .file_type + name. Use the --list-checks arg to receive a + list. Defaults to ALL Checks + -d, --delay INTEGER Time in seconds to sleep between Auditors + being ran, defaults to 0. Use this argument + to avoid rate limiting + -o, --outputs TEXT A list of Outputs (files, APIs, databases, + ChatOps) to send ElectricEye Findings, + specify multiple with additional arguments: + -o csv -o postgresql -o slack [default: + ocsf_stdout] + -of, --output-file TEXT For file outputs such as JSON and CSV, the + name of the file, DO NOT SPECIFY .file_type [default: output] - --list-options Lists all valid Output options - --list-checks Prints a table of Auditors, Checks, and - Check descriptions to stdout - use this for - -a or -c args - --create-insights Create AWS Security Hub Insights for - ElectricEye. This only needs to be done once - per Account per Region for Security Hub - --list-controls Lists all ElectricEye Controls (e.g. Check - Titles) for an Assessment Target - --toml-path TEXT The full path to the TOML file used for + -lo, --list-options Lists all valid Output options + -lch, --list-checks Prints a table of Auditors, Checks, and + Check descriptions to stdout - use this + command for help with populating -a (Auditor + selection) or -c (Check selection) args + -lco, --list-controls Lists all ElectricEye controls - that is to + say: the Check Titles - for an Assessment + Target + -tp, --toml-path TEXT The full path to the TOML file used for configure e.g., ~/path/to/mydir/external_providers.toml. If this value is not provided the default path @@ -135,11 +137,11 @@ The following Cloud Service Providers are on the Roadmap - [For ServiceNow](./docs/setup/Setup_ServiceNow.md) - [For Microsoft M365](./docs/setup/Setup_M365.md) - [For Salesforce](./docs/setup/Setup_Salesforce.md) +- [For Snowflake](./docs/setup/Setup_Snowflake.md) The following SaaS Providers are on the Roadmap - [For Google Workspaces (*Coming Soon*)](./docs/setup/Setup_Google_Workspaces.md) -- [For Snowflake (*Coming Soon*)](./docs/setup/Setup_Snowflake.md) ## Cloud Asset Management (CAM) @@ -150,10 +152,10 @@ For more information on ElectricEye's CAM concept of operations and schema, refe In total there are: - **4** Supported Public CSPs: `AWS`, `GCP`, `OCI`, and `Azure` -- **3** Supported SaaS Providers: `ServiceNow`, `M365`, and `Salesforce` -- **1172** ElectricEye Checks -- **174** Supported CSP & SaaS Asset Components across all Services -- **131** ElectricEye Auditors +- **4** Supported SaaS Providers: `ServiceNow`, `M365`, `Salesforce`, and `Snowflake` +- **1193** ElectricEye Checks +- **177** Supported CSP & SaaS Asset Components across all Services +- **133** ElectricEye Auditors The tables of supported Services and Checks have been migrated to the respective per-Provider setup documentation linked above in [Configuring ElectricEye](#configuring-electriceye). diff --git a/docs/outputs/OUTPUTS.md b/docs/outputs/OUTPUTS.md index 0b0a8635..7d5145f3 100644 --- a/docs/outputs/OUTPUTS.md +++ b/docs/outputs/OUTPUTS.md @@ -13,13 +13,13 @@ This documentation is all about Outputs supported by ElectricEye and how to conf - [Normalized JSON Output](#json-normalized-output) - [Cloud Asset Management JSON Output](#json-cloud-asset-management-cam-output) - [Open Cyber Security Format (OCSF) V1.1.0 Output](#open-cyber-security-format-ocsf-v110-output) +- [Open Cyber Security Format (OCSF) V1.4.0 Output](#open-cyber-security-format-ocsf-v140-output) - [CSV Output](#csv-output) - [AWS Security Hub Output](#aws-security-hub-output) - [MongoDB & AWS DocumentDB Output](#mongodb--aws-documentdb-output) - [Cloud Asset Management MongoDB & AWS DocumentDB Output](#mongodb--aws-documentdb-cloud-asset-management-cam-output) - [PostgreSQL Output](#postgresql-output) - [Cloud Asset Management PostgreSQL Output](#postgresql-cloud-asset-management-cam-output) -- [Firemon Cloud Defense (DisruptOps) Output](#firemon-cloud-defense-disruptops-output) - [Amazon Simple Queue Service (SQS) Output](#amazon-simple-queue-service-sqs-output) - [Slack Output](#slack-output) - [Open Cybersecurity Format (OCSF) -> Amazon Kinesis Data Firehose](#open-cybersecurity-format-ocsf---amazon-kinesis-data-firehose) @@ -34,7 +34,7 @@ To review the list of possible Output providers, use the following ElectricEye c ```bash $ python3 eeauditor/controller.py --list-options -['amazon_sqs', 'cam_json', 'cam_mongodb', 'cam_postgresql', 'csv', 'firemon_cloud_defense', 'html', 'html_compliance', 'json', 'json_normalized', 'mongodb', 'ocsf_kdf', 'ocsf_stdout', 'ocsf_v1_1_0', 'postgresql', 'sechub', 'slack', 'stdout'] +['amazon_sqs', 'cam_json', 'cam_mongodb', 'cam_postgresql', 'csv', 'html', 'html_compliance', 'json', 'json_normalized', 'mongodb', 'ocsf_kdf', 'ocsf_stdout', 'ocsf_v1_1_0', 'ocsf_v1_4_0', 'postgresql', 'sechub', 'slack', 'stdout'] ``` #### IMPORTANT NOTE!! You can specify multiple Outputs by providing the `-o` or `--outputs` argument multiple times, for instance: `python3 eeauditor/controller.py -t AWS -o json -o csv -o postgresql` @@ -104,193 +104,12 @@ For example, if you just want to have a "pretty-printed" JSON output you could u $ python3 eeauditor/controller.py -t AWS -c ebs_volume_encryption_check -o ocsf_stdout | grep 'SchemaVersion' | jq . -r ``` -The OCSF V1.1.0 Output selection will convert all ElectricEye findings into the OCSF format (in JSON) which is a normalized and standardized security-centric data model, well-suited to ingestion in Data Lakes and Data Lake Houses built upon Amazon Security Lake, AWS Glue Data Catalog, Snowflake, Apache Iceberg, Google BigQuery, and more. The Event Class used for this finding is [`compliance_finding [2003]`](https://schema.ocsf.io/1.1.0/classes/compliance_finding?extensions=) +The OCSF V1.4.0 Output selection will convert all ElectricEye findings into the OCSF format (in JSON) which is a normalized and standardized security-centric data model, well-suited to ingestion in Data Lakes and Data Lake Houses built upon Amazon Security Lake, AWS Glue Data Catalog, Snowflake, Apache Iceberg, Google BigQuery, and more. The Event Class used for this finding is [`compliance_finding [2003]`](https://schema.ocsf.io/1.4.0/classes/compliance_finding?extensions=) This Output will provide the `ProductFields.AssetDetails` information. To use this Output include the following arguments in your ElectricEye CLI: `python3 eeauditor/controller.py {..args..} -o ocsf_stdout` you can also choose to *not* specify `-o` at all as it is the default Output. -### OCSF `stdout` Output - -```json -{ - "activity_id": 1, - "activity_name": "Create", - "category_name": "Findings", - "category_uid": 2, - "class_name": "Compliance Finding", - "class_uid": 2003, - "confidence_score": 99, - "severity": "Medium", - "severity_id": 99, - "status": "New", - "status_id": 1, - "time": 1709090374, - "type_name": "Compliance Finding: Create", - "type_uid": 200301, - "metadata": { - "uid": "/subscriptions/0000aaa-1234-bbb-dddd-example123/providers/Microsoft.Security/pricings/Databases/azure-defender-for-cloud-databases-plan-enabled-check", - "correlation_uid": "/subscriptions/0000aaa-1234-bbb-dddd-example123/providers/Microsoft.Security/pricings/Databases/azure-defender-for-cloud-databases-plan-enabled-check", - "version": "1.1.0", - "product": { - "name": "ElectricEye", - "version": "3.0", - "url_string": "https://github.com/jonrau1/ElectricEye", - "vendor_name": "ElectricEye" - }, - "profiles": [ - "cloud" - ] - }, - "cloud": { - "provider": "Azure", - "region": "azure-global", - "account": { - "uid": "0000aaa-1234-bbb-dddd-example123", - "type": "Azure", - "type_uid": 99 - } - }, - "observables": [ - { - "name": "cloud.account.uid", - "type": "Resource UID", - "type_id": 10, - "value": "0000aaa-1234-bbb-dddd-example123" - }, - { - "name": "resource.uid", - "type": "Resource UID", - "type_id": 10, - "value": "/subscriptions/0000aaa-1234-bbb-dddd-example123/providers/Microsoft.Security/pricings/Databases" - } - ], - "compliance": { - "requirements": [ - "AICPA TSC CC7.2", - "CIS Critical Security Controls V8 8.11", - "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.3", - "CMMC 2.0 AU.L2-3.3.5", - "CSA Cloud Controls Matrix V4.0 LOG-05", - "CSA Cloud Controls Matrix V4.0 LOG-13", - "Equifax SCF V1.0 CM-CS-14", - "FBI CJIS Security Policy V5.9 5.3.2.1", - "FBI CJIS Security Policy V5.9 5.3.2.2", - "FBI CJIS Security Policy V5.9 5.3.4", - "FBI CJIS Security Policy V5.9 5.4.1", - "FBI CJIS Security Policy V5.9 5.4.3", - "HIPAA Security Rule 45 CFR Part 164 Subpart C 164.308(a)(1)(ii)(D)", - "HIPAA Security Rule 45 CFR Part 164 Subpart C 164.312(b)", - "ISO 27001:2013 A.12.4.1", - "ISO 27001:2013 A.16.1.1", - "ISO 27001:2013 A.16.1.4", - "ISO 27001:2022 A5.25", - "MITRE ATT&CK T1210", - "NERC Critical Infrastructure Protection CIP-007-6, Requirement R4 Part 4.4", - "NIST CSF V1.1 DE.AE-2", - "NIST SP 800-171 Rev. 2 3.3.3", - "NIST SP 800-171 Rev. 2 3.3.5", - "NIST SP 800-53 Rev. 4 AU-6", - "NIST SP 800-53 Rev. 4 CA-7", - "NIST SP 800-53 Rev. 4 IR-4", - "NIST SP 800-53 Rev. 4 SI-4", - "NIST SP 800-53 Rev. 5 AU-6", - "NIST SP 800-53 Rev. 5 AU-6(1)", - "NZISM V3.5 16.6.14. Event log auditing (CID:2034)", - "PCI-DSS V4.0 10.4.1", - "PCI-DSS V4.0 10.4.1.1", - "PCI-DSS V4.0 10.4.2", - "PCI-DSS V4.0 10.4.3", - "UK NCSC Cyber Assessment Framework V3.1 C1.c" - ], - "control": "Azure.DefenderForCloud.3", - "standards": [ - "AICPA TSC", - "CIS Critical Security Controls V8", - "CMMC 2.0", - "CSA Cloud Controls Matrix V4.0", - "Equifax SCF V1.0", - "FBI CJIS Security Policy V5.9", - "HIPAA Security Rule 45 CFR Part 164 Subpart C", - "ISO 27001:2013", - "ISO 27001:2022", - "MITRE ATT&CK", - "NERC Critical Infrastructure Protection", - "NIST CSF V1.1", - "NIST SP 800-171 Rev. 2", - "NIST SP 800-53 Rev. 4", - "NIST SP 800-53 Rev. 5", - "NZISM V3.5", - "PCI-DSS V4.0", - "UK NCSC Cyber Assessment Framework V3.1" - ], - "status": "Fail", - "status_id": 3 - }, - "finding_info": { - "created_time": 1709090374, - "desc": "Microsoft Defender for Databases plan is not enabled in Subscription 0000aaa-1234-bbb-dddd-example123 because at least one of the four plans is on free tier. Defender for Databases in Microsoft Defender for Cloud allows you to protect your entire database estate with attack detection and threat response for the most popular database types in Azure. Defender for Cloud provides protection for the database engines and for data types, according to their attack surface and security risks: Defender for Azure SQL, SQL Server Machines, Open Source Relational DBs, and Azure Cosmos DBs. Refer to the remediation instructions if this configuration is not intended.", - "first_seen_time": 1709090374, - "modified_time": 1709090374, - "product_uid": "arn:aws:securityhub:us-gov-east-1:123456789012:product/123456789012/default", - "title": "[Azure.DefenderForCloud.3] Microsoft Defender for Databases plan should be enabled on your subscription", - "types": [ - "Software and Configuration Checks" - ], - "uid": "/subscriptions/0000aaa-1234-bbb-dddd-example123/providers/Microsoft.Security/pricings/Databases/azure-defender-for-cloud-databases-plan-enabled-check" - }, - "remediation": { - "desc": "For more information on the Defender for Databases plan and deployments refer to the Protect your databases with Defender for Databases section of the Azure Security Microsoft Defender for Cloud documentation.", - "references": [ - "https://learn.microsoft.com/en-us/azure/defender-for-cloud/tutorial-enable-databases-plan" - ] - }, - "resource": { - "data": [ - { - "id": "/subscriptions/0000aaa-1234-bbb-dddd-example123/providers/Microsoft.Security/pricings/SqlServers", - "name": "SqlServers", - "type": "Microsoft.Security/pricings", - "pricing_tier": "Free", - "free_trial_remaining_time": "P30D" - }, - { - "id": "/subscriptions/0000aaa-1234-bbb-dddd-example123/providers/Microsoft.Security/pricings/SqlServerVirtualMachines", - "name": "SqlServerVirtualMachines", - "type": "Microsoft.Security/pricings", - "pricing_tier": "Free", - "free_trial_remaining_time": "P30D" - }, - { - "id": "/subscriptions/0000aaa-1234-bbb-dddd-example123/providers/Microsoft.Security/pricings/OpenSourceRelationalDatabases", - "name": "OpenSourceRelationalDatabases", - "type": "Microsoft.Security/pricings", - "pricing_tier": "Free", - "free_trial_remaining_time": "P30D" - }, - { - "id": "/subscriptions/0000aaa-1234-bbb-dddd-example123/providers/Microsoft.Security/pricings/CosmosDbs", - "name": "CosmosDbs", - "type": "Microsoft.Security/pricings", - "pricing_tier": "Free", - "free_trial_remaining_time": "P30D" - } - ], - "cloud_partition": null, - "region": "azure-global", - "type": "Microsoft Defender for Cloud", - "uid": "/subscriptions/0000aaa-1234-bbb-dddd-example123/providers/Microsoft.Security/pricings/Databases" - }, - "unmapped": { - "provider_type": "CSP", - "asset_class": "Security Services", - "asset_component": "Microsoft Defender for Databases", - "workflow_status": "NEW", - "record_state": "ACTIVE" - } -} -``` - ## HTML Output The HTML Output selection writes a selective cross-section of ElectricEye findings to an HTML file for viewing in a browser and conversion to PDF and other reporting medium. An "executive report" format is created and forward text is dynamically generated to provide high-level statistics such as a failing percentage, passing and failing checks, number of unique resources, total resources, a breakdown of severity, and the amount of Accounts, Regions, `AssetService`, and `AssetComponents` scanned. @@ -883,6 +702,14 @@ To use this Output include the following arguments in your ElectricEye CLI: `pyt } ``` +## Open Cyber Security Format (OCSF) V1.4.0 Output + +The OCSF V1.4.0 Output selection will convert all ElectricEye findings into the OCSF format (in JSON) which is a normalized and standardized security-centric data model, well-suited to ingestion in Data Lakes and Data Lake Houses built upon Amazon Security Lake, AWS Glue Data Catalog, Snowflake, Apache Iceberg, Google BigQuery, and more. The Event Class used for this finding is [`compliance_finding [2003]`](https://schema.ocsf.io/1.4.0/classes/compliance_finding?extensions=) + +This Output will provide the `ProductFields.AssetDetails` information, it is mapped within `resources.[].data`. + +To use this Output include the following arguments in your ElectricEye CLI: `python3 eeauditor/controller.py {..args..} -o ocsf_v1_4_0` + ## MongoDB & AWS DocumentDB Output The MongoDB Output selection will write all ElectricEye findings to a MongoDB database or to an AWS DocumentDB Instance/Cluster along with the `ProductFields.AssetDetails` using `pymongo`. To facilitate mutable records being written to a Collection, ElectricEye will duplicate the ASFF `Id` (the finding's GUID) into the MongoDB `_id` field and write all records sequentially using the `update_one(upsert=True)` method within `pymongo`. This is written with a filter to replace the entire record where and existing `_id` is located. @@ -1217,22 +1044,6 @@ Note that the TOML configurations are exactly the same as the normal [PostgreSQL ) ``` -## Firemon Cloud Defense (DisruptOps) Output - -The Firemon Cloud Defense Output selection will all ElectricEye findings to a Cloud Defense (DisruptOps) endpoint using `requests`, the `AssetDetails` will be stripped off. A Pro license for Cloud Defense is required for API Access, best effort is made to respect throttling limitations and `4xx` HTTP Codes. ElectricEye will sleep on `429` and will raise an `Exception` on other `4xx` codes. - -This Output *will not* provide the `ProductFields.AssetDetails` information. - -To use this Output include the following arguments in your ElectricEye CLI: `python3 eeauditor/controller.py {..args..} -o postgresql` - -#### NOTE! This Output used to be `-o dops` which has been replaced fully with `-o firemon_cloud_defense` - -Additionally, values within the `[outputs.postgresql]` section of the TOML file *must be provided* for this integration to work. - -- **`firemon_cloud_defense_client_id_value`**: This variable should be set to the Client ID for your FireMon Cloud Defense tenant. This ID is used to authenticate with the FireMon Cloud Defense API. The location where these credentials are stored should match the value of the `global.credentials_location` variable, which specifies the location of the credentials for all integrations. - -- **`firemon_cloud_defense_api_key_value`**: This variable should be set to the API Key for your FireMon Cloud Defense tenant. This key is used to authenticate with the FireMon Cloud Defense API. The location where these credentials are stored should match the value of the `global.credentials_location` variable, which specifies the location of the credentials for all integrations. - ## Amazon Simple Queue Service (SQS) Output **IMPORTANT NOTE**: This requires `sqs:SendMessage` IAM permissions! @@ -1334,7 +1145,7 @@ An example of the "Findings" output. ![SlackFindings](../../screenshots/outputs/slack_findings_output.jpg) -## Open Cybersecurity Format (OCSF) -> Amazon Kinesis Data Firehose +## Open Cybersecurity Format (OCSF) -> Amazon Data Data Firehose **IMPORTANT NOTE**: This requires `firehose:PutRecordBatch` IAM permissions! diff --git a/docs/setup/Setup_AWS.md b/docs/setup/Setup_AWS.md index 51ab43eb..60c705ed 100644 --- a/docs/setup/Setup_AWS.md +++ b/docs/setup/Setup_AWS.md @@ -36,6 +36,8 @@ The easiest way to set up this Role and permissions is either creating a StackSe ## Configuring TOML +> **EXPERIMENTAL**: Using the arguments `-ut` False and `--args` you can provide an escaped JSON object containing the below values instead of using the TOML. For example: `python .\eeauditor\controller.py -ut False --args '{\"credentials_location\": \"CONFIG_FILE\",\"aws_multi_account_target_type\": \"Accounts\",\"aws_account_targets\": [],\"aws_regions_selection\": [],\"aws_electric_eye_iam_role_name\": \"\"}'` will evaluate your current Region and Account for AWS. + This section explains how to configure ElectricEye using a TOML configuration file. The configuration file contains settings for credentials, regions, accounts, and global settings and is located [here](../../eeauditor/external_providers.toml). To configure the TOML file, you need to modify the values of the variables in the `[global]` and `[regions_and_accounts.aws]` sections of the file. Here's an overview of the key variables you need to configure: @@ -112,25 +114,25 @@ pip3 install --user -r requirements.txt 5. Use the Controller to conduct different kinds of Assessments. - - 5A. Retrieve all options for the Controller. +- 5A. Retrieve all options for the Controller. ```bash python3 eeauditor/controller.py --help ``` - - 5B. Evaluate your entire AWS environment. +- 5B. Evaluate your entire AWS environment. ```bash python3 eeauditor/controller.py -t AWS ``` - - 5C. Evaluate your AWS environment against a specifc Auditor (runs all Checks within the Auditor). +- 5C. Evaluate your AWS environment against a specifc Auditor (runs all Checks within the Auditor). ```bash python3 eeauditor/controller.py -t AWS -a AWS_IAM_Auditor ``` - - 5D. Evaluate your AWS environment against a specific Check within any Auditor, it is ***not required*** to specify the Auditor name as well. The below examples runs the `[Athena.1] Athena workgroups should be configured to enforce query result encryption` check. +- 5D. Evaluate your AWS environment against a specific Check within any Auditor, it is ***not required*** to specify the Auditor name as well. The below examples runs the `[Athena.1] Athena workgroups should be configured to enforce query result encryption` check. ```bash python3 eeauditor/controller.py -t AWS -c athena_workgroup_encryption_check diff --git a/docs/setup/Setup_OCI.md b/docs/setup/Setup_OCI.md index 2c7f688d..b091f491 100644 --- a/docs/setup/Setup_OCI.md +++ b/docs/setup/Setup_OCI.md @@ -256,7 +256,7 @@ python3 eeauditor/controller.py -t AWS -a ElectricEye_AttackSurface_OCI_Auditor ## Oracle Cloud Infrastructure Checks & Services -These are the following services and checks perform by each Auditor, there are currently **221 Checks** across **18 Auditors** that support the secure configuration of **25 services/components** +These are the following services and checks performed by each Auditor, there are currently **221 Checks** across **18 Auditors** that support the secure configuration of **25 services/components** | Auditor File Name | Scanned Resource Name | Auditor Scan Description | |---|---|---| diff --git a/docs/setup/Setup_Snowflake.md b/docs/setup/Setup_Snowflake.md index 8aec0e8b..381af26d 100644 --- a/docs/setup/Setup_Snowflake.md +++ b/docs/setup/Setup_Snowflake.md @@ -2,4 +2,164 @@ This documentation is dedicated to using ElectricEye for evaluation of Snowflake enterprise data warehouses using SSPM capabilities. -*Coming Soon!* \ No newline at end of file +## Table of Contents + +- [Setting up Snowflake Permissions](#setting-up-snowflake-permissions) +- [Configuring TOML](#configuring-toml) +- [Use ElectricEye for Snowflake](#use-electriceye-for-snowflake) +- [Snowflake Checks & Services](#snowflake-checks--services) + +## Setting up Snowflake Permissions + +Snowflake's principal identity construct is a User - these can represent regular Users, those created using Single Sign-On (SSO) and SCIM, and can also represent 'service accounts' meant for machine-to-machine connectivity. + +ElectricEye uses Password-based authentication with a 'service account', in the future, RSA private key authentication may be considered. + +ElectricEye only queries data in the `SNOWFLAKE` Database and within the `ACCOUNT_USAGE` schema, the following steps will guide you through creating a Custom role, providing `GRANTS` to the required Database and Schema, and creating a new user. + +**NOTE** - The following steps should be performed using an `ACCOUNTADMIN` or a similarly permissioned User + Role combo that is allowed to create users, create roles, and manage grants. + +1. From the Snowflake console navigate to **Admin** -> **Users & Roles** -> select the **Roles** tab at the top of the window -> select the **+ Role** option at the top-right of the window as shown below. + +![Step 1](../../screenshots/setup/snowflake/step1.JPG) + +2. Enter a **Name** (like `EE_AUDITOR`) and **Comment** while ignoring the **Grant to role** option and select **Create Role** as shown below. + +![Step 2](../../screenshots/setup/snowflake/step2.JPG) + +3. Navigate to **Projects** -> **Worksheets** -> and create a new **SQL Worksheet** from the creation toggle at the top right of the screen as shown below. + +![Step 3](../../screenshots/setup/snowflake/step3.JPG) + +4. Run each of the following SQL commands sequentially within the Worksheet. Do note that the `GRANT IMPORTED PRIVILEGES` grant allows your custom role access to the entire `SNOWFLAKE` database and should be done with care. Ensure you change the name of your Role -- `EE_AUDITOR` is used in this case -- if you used a different name for you role. Likewise, change to name of the Warehouse -- `COMPUTE_WH` is used in this case -- if you have a different warehouse. + +```sql +use role ACCOUNTADMIN +GRANT IMPORTED PRIVILEGES ON DATABASE SNOWFLAKE TO ROLE EE_AUDITOR; +GRANT SELECT ON all tables IN SCHEMA SNOWFLAKE.ACCOUNT_USAGE TO ROLE EE_AUDITOR; +GRANT USAGE ON WAREHOUSE COMPUTE_WH TO ROLE EE_AUDITOR; +``` + +5. Navigate back to **Admin** -> **Users & Roles** -> select the **Users** tab at the top of the window -> select the **+ User** option at the top-right of the window as shown below. + +![Step 5](../../screenshots/setup/snowflake/step5.JPG) + +6. Provide a **User Name**, **Password**, and an optional **Comment**. As this is a "service account" deselect the option to **Force user to change password on first time login**. Under `Advanced User Options`, assign your custom role as the **Default Role**, select a **Default Warehouse**, and select **Create User** as shown below. + +![Step 6](../../screenshots/setup/snowflake/step6.JPG) + +Now that you have setup your Role, Grants, and new "service account" User - you can proceed to the next step to configure the TOML. + +## Configuring TOML + +> **EXPERIMENTAL**: Using the arguments `-ut` False and `--args` you can provide an escaped JSON object containing the below values instead of using the TOML. For example: `python .\eeauditor\controller.py -t Snowflake -ut False --args '{\"credentials_location\": \"CONFIG_FILE\",\"snowflake_username\": \"EXAMPLE\",\"snowflake_password_value\" : \"EXAMPLE\",\"snowflake_account_id\": \"EXAMPLE\",\"snowflake_warehouse_name\": \"EXAMPLE\",\"snowflake_region\": \"EXAMPLE\",\"snowflake_service_account_usernames\": [\"EXAMPLE\", \"EXAMPLE\"]}'` + +This section explains how to configure ElectricEye using a TOML configuration file. The configuration file contains settings for credentials, regions, accounts, and global settings and is located [here](../../eeauditor/external_providers.toml). + +To configure the TOML file, you need to modify the values of the variables in the `[global]`, `[regions_and_accounts.oci]`, and `[credentials.oci]` sections of the file. Here's an overview of the key variables you need to configure: + +- `credentials_location`: Set this variable to specify the location of where credentials are stored and will be retrieved from. You can choose from AWS Systems Manager Parameter Store (`AWS_SSM`), AWS Secrets Manager (`AWS_SECRETS_MANAGER`), or from the TOML file itself (`CONFIG_FILE`) which is **NOT** recommended. + +**NOTE** When retrieving from SSM or Secrets Manager, your current Profile / Boto3 Session is used and *NOT* the ElectricEye Role that is specified in `aws_electric_eye_iam_role_name`. Ensure you have `ssm:GetParameter`, `secretsmanager:GetSecretValue`, and relevant `kms` permissions as needed to retrieve your stored secrets. + +- `snowflake_username`: Username for your Snowflake Account, this should be a user with the ability to read all tables and views in the default schemas. + +- `snowflake_password_value`: The location (or actual contents) of the Password for the User specified in `snowflake_account_id` this location must match the value of `global.credentials_location` e.g., if you specify "AWS_SSM" then the value for this variable should be the name of the AWS Systems Manager Parameter Store SecureString Parameter. + +> It's important to note that this setting is a sensitive credential, and as such, its value should be stored in a secure manner that matches the location specified in the `[global]` section's `credentials_location` setting. For example, if `credentials_location` is set to `"AWS_SSM"`, then the Snowflake_service_account_json_payload_value should be the name of an AWS Systems Manager Parameter Store SecureString parameter that contains the contents of the Snowflake service account key JSON file. + +- `snowflake_account_id`: The Account ID for your Snowflake Account, this is found in the URL when you login to your Snowflake Account, e.g., VULEDAR-MR69420. + +- `snowflake_warehouse_name`: The name of the warehouse you use for querying data in Snowflake, this should be a warehouse that has the ability to run queries + +- `snowflake_region`: The Region of your Snowflake Account, this is found in the URL when you login to your Snowflake Account, e.g., us-east-1 + +- `snowflake_service_account_usernames`: The Usernames of "Service Accounts" created in Snowflake, this will optionally exempt these Usernames from being audited against the following checks: **snowflake_password_assigned_user_has_mfa_check**, **snowflake_accountadmins_have_email_check**, **snowflake_admin_default_role_check**, **snowflake_logins_without_mfa_check**, and **snowflake_bypass_mfa_review_check** + +## Use ElectricEye for Snowflake + +1. With >=Python 3.9 installed, install and upgrade `pip3` and setup `virtualenv`. + +```bash +sudo apt install -y python3-pip +pip3 install --upgrade pip +pip3 install virtualenv --user +virtualenv .venv +``` + +2. This will create a virtualenv directory called `.venv` which needs to be activated. + +```bash +#For macOS and Linux +. .venv/bin/activate + +#For Windows +.venv\scripts\activate +``` + +3. Clone the repo and install all dependencies. + +```bash +git clone https://github.com/jonrau1/ElectricEye.git +cd ElectricEye +pip3 install -r requirements.txt + +# if using AWS CloudShell +pip3 install --user -r requirements.txt +``` + +4. Use the Controller to conduct different kinds of Assessments. Ensure you use the `-tp` / `--toml-path` argument if you have a custom TOML configuration file. + +- 4A. Retrieve all options for the Controller. + + ```bash + python3 eeauditor/controller.py --help + ``` + +- 4B. Evaluate your entire Snowflake Account. + + ```bash + python3 eeauditor/controller.py -t Snowflake + ``` + +- 4C. Evaluate your Snowflake environment against a specifc Auditor (runs all Checks within the Auditor). + + ```bash + python3 eeauditor/controller.py -t Snowflake -a Snowflake_Account_Auditor + ``` + +- 4D. Evaluate your Snowflake environment against a specific Check within any Auditor, it is ***not required*** to specify the Auditor name as well. The below examples runs the "[Snowflake.Account.9] Snowflake Accounts should configure a password policy" check. + + ```bash + python3 eeauditor/controller.py -t Snowflake -c snowflake_account_password_policy_check + ``` + +## Snowflake Checks & Services + +These are the following services and checks performed by each Auditor, there are currently **21 Checks** across **2 Auditors** that support the secure configuration of **3 services/components** + +| Auditor File Name | Scanned Resource Name | Auditor Scan Description | +|---|---|---| +| Snowflake_Users_Auditor | Snowflake user | Password assigned users should use MFA | +| Snowflake_Users_Auditor | Snowflake user | Service account users (without a password) should have an RSA Private Key | +| Snowflake_Users_Auditor | Snowflake user | Snowflake users who have not logged in within 90 days should be disabled | +| Snowflake_Users_Auditor | Snowflake user | Snowflake admin users should have an email | +| Snowflake_Users_Auditor | Snowflake user | Snowflake users should not have built-in admin roles as their default role | +| Snowflake_Users_Auditor | Snowflake user | Snowflake password users should be monitored for logging in without MFA | +| Snowflake_Users_Auditor | Snowflake user | Snowflake admin users should rotate their passwords yearly | +| Snowflake_Users_Auditor | Snowflake user | Snowflake users allowed to bypass MFA should be reviewed | +| Snowflake_Users_Auditor | Snowflake account | Snowflake accounts should have at least 2, but less than 10, admins | +| Snowflake_Account_Auditor | Snowflake account | Snowflake accounts should be configured to use SSO | +| Snowflake_Account_Auditor | Snowflake account | Snowflake accounts should be configured to use SCIM | +| Snowflake_Account_Auditor | Snowflake account | Snowflake accounts should have 15 minute or less session timeouts set for admins | +| Snowflake_Account_Auditor | Snowflake account | Snowflake custom roles should not use built-in admin roles | +| Snowflake_Account_Auditor | Snowflake account | Snowflake tasks shouldn't be owned by admins | +| Snowflake_Account_Auditor | Snowflake account | Snowflake tasks shouldn't run with admin privileges | +| Snowflake_Account_Auditor | Snowflake account | Snowflake stored procedures shouldn't be owned by admins | +| Snowflake_Account_Auditor | Snowflake account | Snowflake stored procedures shouldn't run with admin privileges | +| Snowflake_Account_Auditor | Snowflake account | Snowflake accounts should have a password policy | +| Snowflake_Account_Auditor | Snowflake password policy | Snowflake password policies should require 14 character minimum length for passwords | +| Snowflake_Account_Auditor | Snowflake account | Snowflake users should be monitored for altering their session timeouts | +| Snowflake_Account_Auditor | Snowflake account | Snowflake accounts should use a network policy | + +Continue to check this section for information on active, retired, and renamed checks or using the `--list-checks` command in the CLI! \ No newline at end of file diff --git a/eeauditor/auditors/aws/AWS_Accounts_Auditor.py b/eeauditor/auditors/aws/AWS_Accounts_Auditor.py index bb1fa566..e11c1305 100644 --- a/eeauditor/auditors/aws/AWS_Accounts_Auditor.py +++ b/eeauditor/auditors/aws/AWS_Accounts_Auditor.py @@ -18,10 +18,14 @@ #specific language governing permissions and limitations #under the License. +import logging from check_register import CheckRegister import datetime import botocore +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("AwsAccountAuditor") + registry = CheckRegister() def global_region_generator(awsPartition): @@ -63,7 +67,8 @@ def get_account_alternate_contacts(cache, session): accountAlternateContacts.append("SECURITY") cache["get_account_alternate_contacts"] = accountAlternateContacts return cache["get_account_alternate_contacts"] - except botocore.exceptions.ClientError as error: + except botocore.exceptions.ClientError as err: + logger.warning("Could not get account alternate contacts: %s", err) return {} @registry.register_check("account") diff --git a/eeauditor/auditors/aws/AWS_Glue_Auditor.py b/eeauditor/auditors/aws/AWS_Glue_Auditor.py index 55eb2117..628cf8c9 100644 --- a/eeauditor/auditors/aws/AWS_Glue_Auditor.py +++ b/eeauditor/auditors/aws/AWS_Glue_Auditor.py @@ -18,42 +18,71 @@ #specific language governing permissions and limitations #under the License. +import logging import datetime from check_register import CheckRegister +from botocore.exceptions import ClientError import base64 import json +logging.getLogger().setLevel(logging.INFO) +logger = logging.getLogger("AwsGlueAuditor") + registry = CheckRegister() def list_crawlers(cache, session): glue = session.client("glue") + response = cache.get("list_crawlers") + if response: return response + cache["list_crawlers"] = glue.list_crawlers() return cache["list_crawlers"] +def get_data_catalog_encryption_settings(cache, session): + glue = session.client("glue") + + response = cache.get("get_data_catalog_encryption_settings") + + if response: + return response + + cache["get_data_catalog_encryption_settings"] = glue.get_data_catalog_encryption_settings() + return cache["get_data_catalog_encryption_settings"] + @registry.register_check("glue") def crawler_s3_encryption_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[Glue.1] AWS Glue crawler security configurations should enable Amazon S3 encryption""" glue = session.client("glue") - crawler = list_crawlers(cache, session) - myCrawlers = crawler["CrawlerNames"] - for crawlers in myCrawlers: - crawlerName = str(crawlers) + crawlers = list_crawlers(cache, session)["CrawlerNames"] + # ISO Time + iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() + + for crawler in crawlers: + crawlerName = crawler crawlerArn = f"arn:{awsPartition}:glue:{awsRegion}:{awsAccountId}:crawler/{crawlerName}" - # ISO Time - iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() - response = glue.get_crawler(Name=crawlerName) + response = glue.get_crawler(Name=crawler) + # B64 encode all of the details for the Asset assetJson = json.dumps(response,default=str).encode("utf-8") assetB64 = base64.b64encode(assetJson) + + crawlerS3Encryption = True try: sec = glue.get_security_configuration(Name=response["Crawler"]["CrawlerSecurityConfiguration"]) - s3EncryptionCheck = str(sec["SecurityConfiguration"]["EncryptionConfiguration"]["S3Encryption"][0]["S3EncryptionMode"]) - except KeyError: - s3EncryptionCheck = "DISABLED" - if s3EncryptionCheck == "DISABLED": + sec["SecurityConfiguration"]["EncryptionConfiguration"]["S3Encryption"][0]["S3EncryptionMode"] + except ClientError as ce: + crawlerS3Encryption = False + logger.warning("Failed to get security configuration for crawler %s: %s", crawler, ce) + except KeyError as ke: + crawlerS3Encryption = False + logger.warning("Failed to get security configuration for crawler %s: %s", crawler, ke) + + + # this is a failing check + if crawlerS3Encryption is False: finding = { "SchemaVersion": "2018-10-08", "Id": crawlerArn + "/glue-crawler-s3-encryption-check", @@ -67,7 +96,7 @@ def crawler_s3_encryption_check(cache: dict, session, awsAccountId: str, awsRegi "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, - "Severity": {"Label": "HIGH"}, + "Severity": {"Label": "MEDIUM"}, "Confidence": 99, "Title": "[Glue.1] AWS Glue crawler security configurations should enable Amazon S3 encryption", "Description": "AWS Glue crawler " @@ -484,7 +513,7 @@ def crawler_job_bookmark_encryption_check(cache: dict, session, awsAccountId: st "NIST SP 800-53 Rev. 4 SC-12", "NIST SP 800-53 Rev. 4 SC-28", "AICPA TSC CC6.1", - "ISO 27001:2013 A.8.2.3", + "ISO 27001:2013 A.8.2.3" ], }, "Workflow": {"Status": "RESOLVED"}, @@ -495,286 +524,280 @@ def crawler_job_bookmark_encryption_check(cache: dict, session, awsAccountId: st @registry.register_check("glue") def glue_data_catalog_encryption_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[Glue.4] AWS Glue data catalogs should be encrypted at rest""" - glue = session.client("glue") + response = get_data_catalog_encryption_settings(cache, session) catalogArn = f"arn:{awsPartition}:glue:{awsRegion}:{awsAccountId}:catalog" # ISO Time iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() + # B64 encode all of the details for the Asset + assetJson = json.dumps(response,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + catalogEncrypted = True try: - response = glue.get_data_catalog_encryption_settings() - # B64 encode all of the details for the Asset - assetJson = json.dumps(response,default=str).encode("utf-8") - assetB64 = base64.b64encode(assetJson) - try: - catalogEncryptionCheck = str(response["DataCatalogEncryptionSettings"]["EncryptionAtRest"]["CatalogEncryptionMode"]) - except KeyError: - catalogEncryptionCheck = "DISABLED" - if catalogEncryptionCheck == "DISABLED": - finding = { - "SchemaVersion": "2018-10-08", - "Id": catalogArn + "/glue-data-catalog-encryption-check", - "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", - "GeneratorId": catalogArn, - "AwsAccountId": awsAccountId, - "Types": [ - "Software and Configuration Checks/AWS Security Best Practices", - "Effects/Data Exposure", - ], - "FirstObservedAt": iso8601Time, - "CreatedAt": iso8601Time, - "UpdatedAt": iso8601Time, - "Severity": {"Label": "HIGH"}, - "Confidence": 99, - "Title": "[Glue.4] AWS Glue data catalogs should be encrypted at rest", - "Description": "The AWS Glue data catalog for account " - + awsAccountId - + " is not encrypted. You can enable or disable encryption settings for the entire Data Catalog. In the process, you specify an AWS KMS key that is automatically used when objects, such as tables, databases, partitions, table versions, connections and/or user-defined functions, are written to the Data Catalog. Refer to the remediation instructions if this configuration is not intended", - "Remediation": { - "Recommendation": { - "Text": "For more information on data catalog encryption refer to the Encrypting Your Data Catalog section of the AWS Glue Developer Guide", - "Url": "https://docs.aws.amazon.com/glue/latest/dg/encrypt-glue-data-catalog.html", - } - }, - "ProductFields": { - "ProductName": "ElectricEye", - "Provider": "AWS", - "ProviderType": "CSP", - "ProviderAccountId": awsAccountId, - "AssetRegion": awsRegion, - "AssetDetails": assetB64, - "AssetClass": "Analytics", - "AssetService": "AWS Glue", - "AssetComponent": "Data Catalog" - }, - "Resources": [ - { - "Type": "AwsGlueDataCatalog", - "Id": catalogArn, - "Partition": awsPartition, - "Region": awsRegion, - } - ], - "Compliance": { - "Status": "FAILED", - "RelatedRequirements": [ - "NIST CSF V1.1 PR.DS-1", - "NIST SP 800-53 Rev. 4 MP-8", - "NIST SP 800-53 Rev. 4 SC-12", - "NIST SP 800-53 Rev. 4 SC-28", - "AICPA TSC CC6.1", - "ISO 27001:2013 A.8.2.3", - ], - }, - "Workflow": {"Status": "NEW"}, - "RecordState": "ACTIVE", - } - yield finding - else: - finding = { - "SchemaVersion": "2018-10-08", - "Id": catalogArn + "/glue-data-catalog-encryption-check", - "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", - "GeneratorId": catalogArn, - "AwsAccountId": awsAccountId, - "Types": [ - "Software and Configuration Checks/AWS Security Best Practices", - "Effects/Data Exposure", + response["DataCatalogEncryptionSettings"]["EncryptionAtRest"]["CatalogEncryptionMode"] + except KeyError: + catalogEncrypted = False + + # this is a failing check + if catalogEncrypted is False: + finding = { + "SchemaVersion": "2018-10-08", + "Id": catalogArn + "/glue-data-catalog-encryption-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": catalogArn, + "AwsAccountId": awsAccountId, + "Types": [ + "Software and Configuration Checks/AWS Security Best Practices", + "Effects/Data Exposure", + ], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "MEDIUM"}, + "Confidence": 99, + "Title": "[Glue.4] AWS Glue data catalogs should be encrypted at rest", + "Description": "The AWS Glue data catalog for account " + + awsAccountId + + " is not encrypted. You can enable or disable encryption settings for the entire Data Catalog. In the process, you specify an AWS KMS key that is automatically used when objects, such as tables, databases, partitions, table versions, connections and/or user-defined functions, are written to the Data Catalog. Refer to the remediation instructions if this configuration is not intended", + "Remediation": { + "Recommendation": { + "Text": "For more information on data catalog encryption refer to the Encrypting Your Data Catalog section of the AWS Glue Developer Guide", + "Url": "https://docs.aws.amazon.com/glue/latest/dg/encrypt-glue-data-catalog.html", + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "AWS", + "ProviderType": "CSP", + "ProviderAccountId": awsAccountId, + "AssetRegion": awsRegion, + "AssetDetails": assetB64, + "AssetClass": "Analytics", + "AssetService": "AWS Glue", + "AssetComponent": "Data Catalog" + }, + "Resources": [ + { + "Type": "AwsGlueDataCatalog", + "Id": catalogArn, + "Partition": awsPartition, + "Region": awsRegion, + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.DS-1", + "NIST SP 800-53 Rev. 4 MP-8", + "NIST SP 800-53 Rev. 4 SC-12", + "NIST SP 800-53 Rev. 4 SC-28", + "AICPA TSC CC6.1", + "ISO 27001:2013 A.8.2.3", ], - "FirstObservedAt": iso8601Time, - "CreatedAt": iso8601Time, - "UpdatedAt": iso8601Time, - "Severity": {"Label": "INFORMATIONAL"}, - "Confidence": 99, - "Title": "[Glue.4] AWS Glue data catalogs should be encrypted at rest", - "Description": "The AWS Glue data catalog for account " - + awsAccountId - + " is encrypted.", - "Remediation": { - "Recommendation": { - "Text": "For more information on data catalog encryption refer to the Encrypting Your Data Catalog section of the AWS Glue Developer Guide", - "Url": "https://docs.aws.amazon.com/glue/latest/dg/encrypt-glue-data-catalog.html", - } - }, - "ProductFields": { - "ProductName": "ElectricEye", - "Provider": "AWS", - "ProviderType": "CSP", - "ProviderAccountId": awsAccountId, - "AssetRegion": awsRegion, - "AssetDetails": assetB64, - "AssetClass": "Analytics", - "AssetService": "AWS Glue", - "AssetComponent": "Data Catalog" - }, - "Resources": [ - { - "Type": "AwsGlueDataCatalog", - "Id": catalogArn, - "Partition": awsPartition, - "Region": awsRegion, - } + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE", + } + yield finding + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": catalogArn + "/glue-data-catalog-encryption-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": catalogArn, + "AwsAccountId": awsAccountId, + "Types": [ + "Software and Configuration Checks/AWS Security Best Practices", + "Effects/Data Exposure", + ], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Glue.4] AWS Glue data catalogs should be encrypted at rest", + "Description": "The AWS Glue data catalog for account " + + awsAccountId + + " is encrypted.", + "Remediation": { + "Recommendation": { + "Text": "For more information on data catalog encryption refer to the Encrypting Your Data Catalog section of the AWS Glue Developer Guide", + "Url": "https://docs.aws.amazon.com/glue/latest/dg/encrypt-glue-data-catalog.html", + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "AWS", + "ProviderType": "CSP", + "ProviderAccountId": awsAccountId, + "AssetRegion": awsRegion, + "AssetDetails": assetB64, + "AssetClass": "Analytics", + "AssetService": "AWS Glue", + "AssetComponent": "Data Catalog" + }, + "Resources": [ + { + "Type": "AwsGlueDataCatalog", + "Id": catalogArn, + "Partition": awsPartition, + "Region": awsRegion, + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.DS-1", + "NIST SP 800-53 Rev. 4 MP-8", + "NIST SP 800-53 Rev. 4 SC-12", + "NIST SP 800-53 Rev. 4 SC-28", + "AICPA TSC CC6.1", + "ISO 27001:2013 A.8.2.3", ], - "Compliance": { - "Status": "PASSED", - "RelatedRequirements": [ - "NIST CSF V1.1 PR.DS-1", - "NIST SP 800-53 Rev. 4 MP-8", - "NIST SP 800-53 Rev. 4 SC-12", - "NIST SP 800-53 Rev. 4 SC-28", - "AICPA TSC CC6.1", - "ISO 27001:2013 A.8.2.3", - ], - }, - "Workflow": {"Status": "RESOLVED"}, - "RecordState": "ARCHIVED", - } - yield finding - except Exception as e: - if str(e) == '"CrawlerSecurityConfiguration"': - pass - else: - print(e) + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED", + } + yield finding @registry.register_check("glue") def glue_data_catalog_password_encryption_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[Glue.5] AWS Glue data catalogs should be configured to encrypt connection passwords""" - glue = session.client("glue") + response = get_data_catalog_encryption_settings(cache, session) catalogArn = f"arn:{awsPartition}:glue:{awsRegion}:{awsAccountId}:catalog" # ISO Time iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() + # B64 encode all of the details for the Asset + assetJson = json.dumps(response,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + passwordEncryptionCheck = True try: - response = glue.get_data_catalog_encryption_settings() - # B64 encode all of the details for the Asset - assetJson = json.dumps(response,default=str).encode("utf-8") - assetB64 = base64.b64encode(assetJson) - try: - passwordEncryptionCheck = str(response["DataCatalogEncryptionSettings"]["ConnectionPasswordEncryption"]["ReturnConnectionPasswordEncrypted"]) - except: - passwordEncryptionCheck = "False" - if passwordEncryptionCheck == "False": - finding = { - "SchemaVersion": "2018-10-08", - "Id": catalogArn + "/glue-data-catalog-password-encryption-check", - "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", - "GeneratorId": catalogArn, - "AwsAccountId": awsAccountId, - "Types": [ - "Software and Configuration Checks/AWS Security Best Practices", - "Effects/Data Exposure", - ], - "FirstObservedAt": iso8601Time, - "CreatedAt": iso8601Time, - "UpdatedAt": iso8601Time, - "Severity": {"Label": "HIGH"}, - "Confidence": 99, - "Title": "[Glue.5] AWS Glue data catalogs should be configured to encrypt connection passwords", - "Description": "The AWS Glue data catalog for account " - + awsAccountId - + " is not configured to encrypt connection passwords. You can retrieve connection passwords in the AWS Glue Data Catalog by using the GetConnection and GetConnections API operations. These passwords are stored in the Data Catalog connection and are used when AWS Glue connects to a Java Database Connectivity (JDBC) data store. When the connection was created or updated, an option in the Data Catalog settings determined whether the password was encrypted. Refer to the remediation instructions if this configuration is not intended", - "Remediation": { - "Recommendation": { - "Text": "For more information on data catalog connection password encryption refer to the Encrypting Connection Passwords section of the AWS Glue Developer Guide", - "Url": "https://docs.aws.amazon.com/glue/latest/dg/encrypt-connection-passwords.html", - } - }, - "ProductFields": { - "ProductName": "ElectricEye", - "Provider": "AWS", - "ProviderType": "CSP", - "ProviderAccountId": awsAccountId, - "AssetRegion": awsRegion, - "AssetDetails": assetB64, - "AssetClass": "Analytics", - "AssetService": "AWS Glue", - "AssetComponent": "Data Catalog" - }, - "Resources": [ - { - "Type": "AwsGlueDataCatalog", - "Id": catalogArn, - "Partition": awsPartition, - "Region": awsRegion, - } - ], - "Compliance": { - "Status": "FAILED", - "RelatedRequirements": [ - "NIST CSF V1.1 PR.DS-1", - "NIST SP 800-53 Rev. 4 MP-8", - "NIST SP 800-53 Rev. 4 SC-12", - "NIST SP 800-53 Rev. 4 SC-28", - "AICPA TSC CC6.1", - "ISO 27001:2013 A.8.2.3", - ], - }, - "Workflow": {"Status": "NEW"}, - "RecordState": "ACTIVE", - } - yield finding - else: - finding = { - "SchemaVersion": "2018-10-08", - "Id": catalogArn + "/glue-data-catalog-password-encryption-check", - "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", - "GeneratorId": catalogArn, - "AwsAccountId": awsAccountId, - "Types": [ - "Software and Configuration Checks/AWS Security Best Practices", - "Effects/Data Exposure", + response["DataCatalogEncryptionSettings"]["ConnectionPasswordEncryption"]["ReturnConnectionPasswordEncrypted"] + except KeyError: + passwordEncryptionCheck = False + + # this is a failing check + if passwordEncryptionCheck is False: + finding = { + "SchemaVersion": "2018-10-08", + "Id": catalogArn + "/glue-data-catalog-password-encryption-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": catalogArn, + "AwsAccountId": awsAccountId, + "Types": [ + "Software and Configuration Checks/AWS Security Best Practices", + "Effects/Data Exposure", + ], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Glue.5] AWS Glue data catalogs should be configured to encrypt connection passwords", + "Description": "The AWS Glue data catalog for account " + + awsAccountId + + " is not configured to encrypt connection passwords. You can retrieve connection passwords in the AWS Glue Data Catalog by using the GetConnection and GetConnections API operations. These passwords are stored in the Data Catalog connection and are used when AWS Glue connects to a Java Database Connectivity (JDBC) data store. When the connection was created or updated, an option in the Data Catalog settings determined whether the password was encrypted. Refer to the remediation instructions if this configuration is not intended", + "Remediation": { + "Recommendation": { + "Text": "For more information on data catalog connection password encryption refer to the Encrypting Connection Passwords section of the AWS Glue Developer Guide", + "Url": "https://docs.aws.amazon.com/glue/latest/dg/encrypt-connection-passwords.html", + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "AWS", + "ProviderType": "CSP", + "ProviderAccountId": awsAccountId, + "AssetRegion": awsRegion, + "AssetDetails": assetB64, + "AssetClass": "Analytics", + "AssetService": "AWS Glue", + "AssetComponent": "Data Catalog" + }, + "Resources": [ + { + "Type": "AwsGlueDataCatalog", + "Id": catalogArn, + "Partition": awsPartition, + "Region": awsRegion, + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.DS-1", + "NIST SP 800-53 Rev. 4 MP-8", + "NIST SP 800-53 Rev. 4 SC-12", + "NIST SP 800-53 Rev. 4 SC-28", + "AICPA TSC CC6.1", + "ISO 27001:2013 A.8.2.3", ], - "FirstObservedAt": iso8601Time, - "CreatedAt": iso8601Time, - "UpdatedAt": iso8601Time, - "Severity": {"Label": "INFORMATIONAL"}, - "Confidence": 99, - "Title": "[Glue.5] AWS Glue data catalogs should be configured to encrypt connection passwords", - "Description": "The AWS Glue data catalog for account " - + awsAccountId - + " is configured to encrypt connection passwords.", - "Remediation": { - "Recommendation": { - "Text": "For more information on data catalog connection password encryption refer to the Encrypting Connection Passwords section of the AWS Glue Developer Guide", - "Url": "https://docs.aws.amazon.com/glue/latest/dg/encrypt-connection-passwords.html", - } - }, - "ProductFields": { - "ProductName": "ElectricEye", - "Provider": "AWS", - "ProviderType": "CSP", - "ProviderAccountId": awsAccountId, - "AssetRegion": awsRegion, - "AssetDetails": assetB64, - "AssetClass": "Analytics", - "AssetService": "AWS Glue", - "AssetComponent": "Data Catalog" - }, - "Resources": [ - { - "Type": "AwsGlueDataCatalog", - "Id": catalogArn, - "Partition": awsPartition, - "Region": awsRegion, - } + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE", + } + yield finding + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": catalogArn + "/glue-data-catalog-password-encryption-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": catalogArn, + "AwsAccountId": awsAccountId, + "Types": [ + "Software and Configuration Checks/AWS Security Best Practices", + "Effects/Data Exposure", + ], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Glue.5] AWS Glue data catalogs should be configured to encrypt connection passwords", + "Description": "The AWS Glue data catalog for account " + + awsAccountId + + " is configured to encrypt connection passwords.", + "Remediation": { + "Recommendation": { + "Text": "For more information on data catalog connection password encryption refer to the Encrypting Connection Passwords section of the AWS Glue Developer Guide", + "Url": "https://docs.aws.amazon.com/glue/latest/dg/encrypt-connection-passwords.html", + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "AWS", + "ProviderType": "CSP", + "ProviderAccountId": awsAccountId, + "AssetRegion": awsRegion, + "AssetDetails": assetB64, + "AssetClass": "Analytics", + "AssetService": "AWS Glue", + "AssetComponent": "Data Catalog" + }, + "Resources": [ + { + "Type": "AwsGlueDataCatalog", + "Id": catalogArn, + "Partition": awsPartition, + "Region": awsRegion, + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.DS-1", + "NIST SP 800-53 Rev. 4 MP-8", + "NIST SP 800-53 Rev. 4 SC-12", + "NIST SP 800-53 Rev. 4 SC-28", + "AICPA TSC CC6.1", + "ISO 27001:2013 A.8.2.3", ], - "Compliance": { - "Status": "PASSED", - "RelatedRequirements": [ - "NIST CSF V1.1 PR.DS-1", - "NIST SP 800-53 Rev. 4 MP-8", - "NIST SP 800-53 Rev. 4 SC-12", - "NIST SP 800-53 Rev. 4 SC-28", - "AICPA TSC CC6.1", - "ISO 27001:2013 A.8.2.3", - ], - }, - "Workflow": {"Status": "RESOLVED"}, - "RecordState": "ARCHIVED", - } - yield finding - except Exception as e: - if str(e) == '"CrawlerSecurityConfiguration"': - pass - else: - print(e) + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED", + } + yield finding @registry.register_check("glue") def glue_data_catalog_resource_policy_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: @@ -945,4 +968,6 @@ def glue_data_catalog_resource_policy_check(cache: dict, session, awsAccountId: } yield finding else: - print(e) \ No newline at end of file + print(e) + +# EOF \ No newline at end of file diff --git a/eeauditor/auditors/aws/Amazon_EC2_Auditor.py b/eeauditor/auditors/aws/Amazon_EC2_Auditor.py index ea3e5fb6..27d0d751 100644 --- a/eeauditor/auditors/aws/Amazon_EC2_Auditor.py +++ b/eeauditor/auditors/aws/Amazon_EC2_Auditor.py @@ -18,6 +18,7 @@ #specific language governing permissions and limitations #under the License. +import logging import tomli import os import sys @@ -30,6 +31,8 @@ import base64 import json +logger = logging.getLogger("AwsEc2Auditor") + SHODAN_HOSTS_URL = "https://api.shodan.io/shodan/host/" # Adding backoff and retries for SSM - this API gets throttled a lot @@ -175,7 +178,7 @@ def get_shodan_api_key(cache): credLocation = data["global"]["credentials_location"] shodanCredValue = data["global"]["shodan_api_key_value"] if credLocation not in validCredLocations: - print(f"Invalid option for [global.credLocation]. Must be one of {str(validCredLocations)}.") + logger.error("Invalid option for [global.credLocation]. Must be one of %s.", validCredLocations) sys.exit(2) if not shodanCredValue: apiKey = None @@ -197,8 +200,8 @@ def get_shodan_api_key(cache): Name=shodanCredValue, WithDecryption=True )["Parameter"]["Value"] - except ClientError as e: - print(f"Error retrieving API Key from SSM, skipping all Shodan checks, error: {e}") + except ClientError as err: + logger.warning("Error retrieving API Key from AWS Systems Manager Parameter Store, skipping all Shodan checks, error: %s", err) apiKey = None # Retrieve the credential from AWS Secrets Manager @@ -207,8 +210,8 @@ def get_shodan_api_key(cache): apiKey = asm.get_secret_value( SecretId=shodanCredValue, )["SecretString"] - except ClientError as e: - print(f"Error retrieving API Key from ASM, skipping all Shodan checks, error: {e}") + except ClientError as err: + logger.warning("Error retrieving API Key from AWS Secrets Manager, skipping all Shodan checks, error: %s", err) apiKey = None cache["get_shodan_api_key"] = apiKey diff --git a/eeauditor/auditors/aws/Amazon_ECR_Auditor.py b/eeauditor/auditors/aws/Amazon_ECR_Auditor.py index b4c645ef..14f73338 100644 --- a/eeauditor/auditors/aws/Amazon_ECR_Auditor.py +++ b/eeauditor/auditors/aws/Amazon_ECR_Auditor.py @@ -76,7 +76,7 @@ def ecr_repo_vuln_scan_check(cache: dict, session, awsAccountId: str, awsRegion: enhancedScan = False # If neither scanning is active, this is a failing check - if basicScan and enhancedScan == False: + if basicScan is False and enhancedScan is False: finding = { "SchemaVersion": "2018-10-08", "Id": repoArn + "/ecr-no-scan", @@ -215,17 +215,24 @@ def ecr_repo_vuln_scan_check(cache: dict, session, awsAccountId: str, awsRegion: def ecr_repo_image_lifecycle_policy_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[ECR.2] ECR repositories should be have an image lifecycle policy configured""" ecr = session.client("ecr") + # ISO Time + iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() for repo in describe_repositories(cache, session): # B64 encode all of the details for the Asset assetJson = json.dumps(repo,default=str).encode("utf-8") assetB64 = base64.b64encode(assetJson) repoArn = repo["repositoryArn"] repoName = repo["repositoryName"] - # ISO Time - iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() + + # Evaluate if a lifecycle policy is configured + lifecyclePolicy = True try: - # this is a passing finding ecr.get_lifecycle_policy(repositoryName=repoName) + except botocore.exceptions.ClientError: + lifecyclePolicy = False + + # this is a passing check + if lifecyclePolicy is True: finding = { "SchemaVersion": "2018-10-08", "Id": repoArn + "/ecr-lifecycle-policy-check", @@ -296,94 +303,101 @@ def ecr_repo_image_lifecycle_policy_check(cache: dict, session, awsAccountId: st "RecordState": "ARCHIVED", } yield finding - except botocore.exceptions.ClientError as error: - if error.response["Error"]["Code"] == "LifecyclePolicyNotFoundException": - finding = { - "SchemaVersion": "2018-10-08", - "Id": repoArn + "/ecr-lifecycle-policy-check", - "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", - "GeneratorId": repoArn, - "AwsAccountId": awsAccountId, - "Types": ["Software and Configuration Checks/AWS Security Best Practices"], - "FirstObservedAt": iso8601Time, - "CreatedAt": iso8601Time, - "UpdatedAt": iso8601Time, - "Severity": {"Label": "MEDIUM"}, - "Confidence": 99, - "Title": "[ECR.2] ECR repositories should be have an image lifecycle policy configured", - "Description": f"ECR repository {repoName} does not have an image lifecycle policy configured. Amazon ECR lifecycle policies provide more control over the lifecycle management of images in a private repository. A lifecycle policy contains one or more rules, where each rule defines an action for Amazon ECR. This provides a way to automate the cleaning up of your container images by expiring images based on age or count. You should expect that images become expired within 24 hours after they meet the expiration criteria per your lifecycle policy. When Amazon ECR performs an action based on a lifecycle policy, this is captured as an event in AWS CloudTrail. When considering the use of lifecycle policies, it's important to use the lifecycle policy preview to confirm which images the lifecycle policy expires before applying it to a repository. Using Lifecycle Policies can help to reduce security exposure by forcefully removing stale images and promoting good image hygeine by having processes to continually scan and rebuild container images. Refer to the remediation instructions if this configuration is not intended", - "Remediation": { - "Recommendation": { - "Text": "If your repository should be configured to have an image lifecycle policy refer to the Amazon ECR Lifecycle Policies section in the Amazon ECR User Guide", - "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html", - } - }, - "ProductFields": { - "ProductName": "ElectricEye", - "Provider": "AWS", - "ProviderType": "CSP", - "ProviderAccountId": awsAccountId, - "AssetRegion": awsRegion, - "AssetDetails": assetB64, - "AssetClass": "Containers", - "AssetService": "Amazon Elastic Container Registry", - "AssetComponent": "Repository" - }, - "Resources": [ - { - "Type": "AwsEcrRepository", - "Id": repoArn, - "Partition": awsPartition, - "Region": awsRegion, - "Details": {"Other": {"RepositoryName": repoName}}, - } - ], - "Compliance": { - "Status": "FAILED", - "RelatedRequirements": [ - "NIST CSF V1.1 ID.BE-5", - "NIST CSF V1.1 PR.DS-4", - "NIST CSF V1.1 PR.PT-5", - "NIST SP 800-53 Rev. 4 AU-4", - "NIST SP 800-53 Rev. 4 CP-2", - "NIST SP 800-53 Rev. 4 CP-7", - "NIST SP 800-53 Rev. 4 CP-8", - "NIST SP 800-53 Rev. 4 CP-11", - "NIST SP 800-53 Rev. 4 CP-13", - "NIST SP 800-53 Rev. 4 PL-8", - "NIST SP 800-53 Rev. 4 SA-14", - "NIST SP 800-53 Rev. 4 SC-5", - "NIST SP 800-53 Rev. 4 SC-6", - "AICPA TSC CC3.1", - "AICPA TSC A1.1", - "AICPA TSC A1.2", - "ISO 27001:2013 A.11.1.4", - "ISO 27001:2013 A.12.3.1", - "ISO 27001:2013 A.17.1.1", - "ISO 27001:2013 A.17.1.2", - "ISO 27001:2013 A.17.2.1" - ] - }, - "Workflow": {"Status": "NEW"}, - "RecordState": "ACTIVE", - } - yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": repoArn + "/ecr-lifecycle-policy-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": repoArn, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "MEDIUM"}, + "Confidence": 99, + "Title": "[ECR.2] ECR repositories should be have an image lifecycle policy configured", + "Description": f"ECR repository {repoName} does not have an image lifecycle policy configured. Amazon ECR lifecycle policies provide more control over the lifecycle management of images in a private repository. A lifecycle policy contains one or more rules, where each rule defines an action for Amazon ECR. This provides a way to automate the cleaning up of your container images by expiring images based on age or count. You should expect that images become expired within 24 hours after they meet the expiration criteria per your lifecycle policy. When Amazon ECR performs an action based on a lifecycle policy, this is captured as an event in AWS CloudTrail. When considering the use of lifecycle policies, it's important to use the lifecycle policy preview to confirm which images the lifecycle policy expires before applying it to a repository. Using Lifecycle Policies can help to reduce security exposure by forcefully removing stale images and promoting good image hygeine by having processes to continually scan and rebuild container images. Refer to the remediation instructions if this configuration is not intended", + "Remediation": { + "Recommendation": { + "Text": "If your repository should be configured to have an image lifecycle policy refer to the Amazon ECR Lifecycle Policies section in the Amazon ECR User Guide", + "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html", + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "AWS", + "ProviderType": "CSP", + "ProviderAccountId": awsAccountId, + "AssetRegion": awsRegion, + "AssetDetails": assetB64, + "AssetClass": "Containers", + "AssetService": "Amazon Elastic Container Registry", + "AssetComponent": "Repository" + }, + "Resources": [ + { + "Type": "AwsEcrRepository", + "Id": repoArn, + "Partition": awsPartition, + "Region": awsRegion, + "Details": {"Other": {"RepositoryName": repoName}}, + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 ID.BE-5", + "NIST CSF V1.1 PR.DS-4", + "NIST CSF V1.1 PR.PT-5", + "NIST SP 800-53 Rev. 4 AU-4", + "NIST SP 800-53 Rev. 4 CP-2", + "NIST SP 800-53 Rev. 4 CP-7", + "NIST SP 800-53 Rev. 4 CP-8", + "NIST SP 800-53 Rev. 4 CP-11", + "NIST SP 800-53 Rev. 4 CP-13", + "NIST SP 800-53 Rev. 4 PL-8", + "NIST SP 800-53 Rev. 4 SA-14", + "NIST SP 800-53 Rev. 4 SC-5", + "NIST SP 800-53 Rev. 4 SC-6", + "AICPA TSC CC3.1", + "AICPA TSC A1.1", + "AICPA TSC A1.2", + "ISO 27001:2013 A.11.1.4", + "ISO 27001:2013 A.12.3.1", + "ISO 27001:2013 A.17.1.1", + "ISO 27001:2013 A.17.1.2", + "ISO 27001:2013 A.17.2.1" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE", + } + yield finding @registry.register_check("ecr") def ecr_repo_permission_policy_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[ECR.3] ECR repositories should be have a repository policy configured""" ecr = session.client("ecr") + # ISO Time + iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() for repo in describe_repositories(cache, session): # B64 encode all of the details for the Asset assetJson = json.dumps(repo,default=str).encode("utf-8") assetB64 = base64.b64encode(assetJson) repoArn = repo["repositoryArn"] repoName = repo["repositoryName"] - # ISO Time - iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() + + # Evaluate if there is a repository permission policy configured + repoPermissionPolicy = True try: - # this is a passing finding ecr.get_repository_policy(repositoryName=repoName) + except botocore.exceptions.ClientError: + repoPermissionPolicy = False + + # this is a passing finding + if repoPermissionPolicy is True: finding = { "SchemaVersion": "2018-10-08", "Id": repoArn + "/ecr-repo-access-policy-check", @@ -488,112 +502,112 @@ def ecr_repo_permission_policy_check(cache: dict, session, awsAccountId: str, aw "RecordState": "ARCHIVED", } yield finding - except botocore.exceptions.ClientError as error: - if error.response["Error"]["Code"] == "RepositoryPolicyNotFoundException": - finding = { - "SchemaVersion": "2018-10-08", - "Id": repoArn + "/ecr-repo-access-policy-check", - "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", - "GeneratorId": repoArn, - "AwsAccountId": awsAccountId, - "Types": ["Software and Configuration Checks/AWS Security Best Practices"], - "FirstObservedAt": iso8601Time, - "CreatedAt": iso8601Time, - "UpdatedAt": iso8601Time, - "Severity": {"Label": "MEDIUM"}, - "Confidence": 99, - "Title": "[ECR.3] ECR repositories should be have a repository policy configured", - "Description": "ECR repository " - + repoName - + " does not have a repository policy configured. Refer to the remediation instructions if this configuration is not intended", - "Remediation": { - "Recommendation": { - "Text": "If your repository should be configured to have a repository policy refer to the Amazon ECR Repository Policies section in the Amazon ECR User Guide", - "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html", - } - }, - "ProductFields": { - "ProductName": "ElectricEye", - "Provider": "AWS", - "ProviderType": "CSP", - "ProviderAccountId": awsAccountId, - "AssetRegion": awsRegion, - "AssetDetails": assetB64, - "AssetClass": "Containers", - "AssetService": "Amazon Elastic Container Registry", - "AssetComponent": "Repository" - }, - "Resources": [ - { - "Type": "AwsEcrRepository", - "Id": repoArn, - "Partition": awsPartition, - "Region": awsRegion, - "Details": {"Other": {"RepositoryName": repoName}}, - } - ], - "Compliance": { - "Status": "FAILED", - "RelatedRequirements": [ - "NIST CSF V1.1 PR.AC-3", - "NIST CSF V1.1 PR.AC-4", - "NIST CSF V1.1 PR.DS-5", - "NIST SP 800-53 Rev. 4 AC-1", - "NIST SP 800-53 Rev. 4 AC-2", - "NIST SP 800-53 Rev. 4 AC-3", - "NIST SP 800-53 Rev. 4 AC-4", - "NIST SP 800-53 Rev. 4 AC-5", - "NIST SP 800-53 Rev. 4 AC-6", - "NIST SP 800-53 Rev. 4 AC-14", - "NIST SP 800-53 Rev. 4 AC-16", - "NIST SP 800-53 Rev. 4 AC-17", - "NIST SP 800-53 Rev. 4 AC-19", - "NIST SP 800-53 Rev. 4 AC-20", - "NIST SP 800-53 Rev. 4 AC-24", - "NIST SP 800-53 Rev. 4 PE-19", - "NIST SP 800-53 Rev. 4 PS-3", - "NIST SP 800-53 Rev. 4 PS-6", - "NIST SP 800-53 Rev. 4 SC-7", - "NIST SP 800-53 Rev. 4 SC-8", - "NIST SP 800-53 Rev. 4 SC-13", - "NIST SP 800-53 Rev. 4 SC-15", - "NIST SP 800-53 Rev. 4 SC-31", - "NIST SP 800-53 Rev. 4 SI-4", - "AICPA TSC CC6.3", - "AICPA TSC CC6.6", - "AICPA TSC CC7.2", - "ISO 27001:2013 A.6.1.2", - "ISO 27001:2013 A.6.2.1", - "ISO 27001:2013 A.6.2.2", - "ISO 27001:2013 A.7.1.1", - "ISO 27001:2013 A.7.1.2", - "ISO 27001:2013 A.7.3.1", - "ISO 27001:2013 A.8.2.2", - "ISO 27001:2013 A.8.2.3", - "ISO 27001:2013 A.9.1.1", - "ISO 27001:2013 A.9.1.2", - "ISO 27001:2013 A.9.2.3", - "ISO 27001:2013 A.9.4.1", - "ISO 27001:2013 A.9.4.4", - "ISO 27001:2013 A.9.4.5", - "ISO 27001:2013 A.10.1.1", - "ISO 27001:2013 A.11.1.4", - "ISO 27001:2013 A.11.1.5", - "ISO 27001:2013 A.11.2.1", - "ISO 27001:2013 A.11.2.6", - "ISO 27001:2013 A.13.1.1", - "ISO 27001:2013 A.13.1.3", - "ISO 27001:2013 A.13.2.1", - "ISO 27001:2013 A.13.2.3", - "ISO 27001:2013 A.13.2.4", - "ISO 27001:2013 A.14.1.2", - "ISO 27001:2013 A.14.1.3" - ] - }, - "Workflow": {"Status": "NEW"}, - "RecordState": "ACTIVE", - } - yield finding + # this is a failing finding + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": repoArn + "/ecr-repo-access-policy-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": repoArn, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "MEDIUM"}, + "Confidence": 99, + "Title": "[ECR.3] ECR repositories should be have a repository policy configured", + "Description": "ECR repository " + + repoName + + " does not have a repository policy configured. Refer to the remediation instructions if this configuration is not intended", + "Remediation": { + "Recommendation": { + "Text": "If your repository should be configured to have a repository policy refer to the Amazon ECR Repository Policies section in the Amazon ECR User Guide", + "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html", + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "AWS", + "ProviderType": "CSP", + "ProviderAccountId": awsAccountId, + "AssetRegion": awsRegion, + "AssetDetails": assetB64, + "AssetClass": "Containers", + "AssetService": "Amazon Elastic Container Registry", + "AssetComponent": "Repository" + }, + "Resources": [ + { + "Type": "AwsEcrRepository", + "Id": repoArn, + "Partition": awsPartition, + "Region": awsRegion, + "Details": {"Other": {"RepositoryName": repoName}}, + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-3", + "NIST CSF V1.1 PR.AC-4", + "NIST CSF V1.1 PR.DS-5", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 AC-3", + "NIST SP 800-53 Rev. 4 AC-4", + "NIST SP 800-53 Rev. 4 AC-5", + "NIST SP 800-53 Rev. 4 AC-6", + "NIST SP 800-53 Rev. 4 AC-14", + "NIST SP 800-53 Rev. 4 AC-16", + "NIST SP 800-53 Rev. 4 AC-17", + "NIST SP 800-53 Rev. 4 AC-19", + "NIST SP 800-53 Rev. 4 AC-20", + "NIST SP 800-53 Rev. 4 AC-24", + "NIST SP 800-53 Rev. 4 PE-19", + "NIST SP 800-53 Rev. 4 PS-3", + "NIST SP 800-53 Rev. 4 PS-6", + "NIST SP 800-53 Rev. 4 SC-7", + "NIST SP 800-53 Rev. 4 SC-8", + "NIST SP 800-53 Rev. 4 SC-13", + "NIST SP 800-53 Rev. 4 SC-15", + "NIST SP 800-53 Rev. 4 SC-31", + "NIST SP 800-53 Rev. 4 SI-4", + "AICPA TSC CC6.3", + "AICPA TSC CC6.6", + "AICPA TSC CC7.2", + "ISO 27001:2013 A.6.1.2", + "ISO 27001:2013 A.6.2.1", + "ISO 27001:2013 A.6.2.2", + "ISO 27001:2013 A.7.1.1", + "ISO 27001:2013 A.7.1.2", + "ISO 27001:2013 A.7.3.1", + "ISO 27001:2013 A.8.2.2", + "ISO 27001:2013 A.8.2.3", + "ISO 27001:2013 A.9.1.1", + "ISO 27001:2013 A.9.1.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.4.1", + "ISO 27001:2013 A.9.4.4", + "ISO 27001:2013 A.9.4.5", + "ISO 27001:2013 A.10.1.1", + "ISO 27001:2013 A.11.1.4", + "ISO 27001:2013 A.11.1.5", + "ISO 27001:2013 A.11.2.1", + "ISO 27001:2013 A.11.2.6", + "ISO 27001:2013 A.13.1.1", + "ISO 27001:2013 A.13.1.3", + "ISO 27001:2013 A.13.2.1", + "ISO 27001:2013 A.13.2.3", + "ISO 27001:2013 A.13.2.4", + "ISO 27001:2013 A.14.1.2", + "ISO 27001:2013 A.14.1.3" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE", + } + yield finding @registry.register_check("ecr") def ecr_latest_image_vuln_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: @@ -604,175 +618,174 @@ def ecr_latest_image_vuln_check(cache: dict, session, awsAccountId: str, awsRegi for repo in describe_repositories(cache, session): # B64 encode all of the details for the Asset repoName = repo["repositoryName"] - if repo["imageScanningConfiguration"]["scanOnPush"] == True: - try: - for images in ecr.describe_images(repositoryName=repoName, filter={"tagStatus": "TAGGED"}, maxResults=1000,)["imageDetails"]: - assetJson = json.dumps(images,default=str).encode("utf-8") - assetB64 = base64.b64encode(assetJson) - imageDigest = str(images["imageDigest"]) - # use the first tag only as we need it to create the canonical ID for the Resource.Id in the ASFF for the Container Resource.Type - imageTag = str(images["imageTags"][0]) - try: - imageVulnCheck = str( - images["imageScanFindingsSummary"]["findingSeverityCounts"] - ) - except KeyError: - imageVulnCheck = "{}" - # Failing check - if imageVulnCheck != "{}": - finding = { - "SchemaVersion": "2018-10-08", - "Id": f"arn:{awsPartition}:ecr:{awsRegion}:{awsAccountId}:image/{repoName}:{imageTag}/ecr-latest-image-vuln-check", - "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", - "GeneratorId": imageDigest, - "AwsAccountId": awsAccountId, - "Types": [ - "Software and Configuration Checks/Vulnerabilities/CVE", - "Software and Configuration Checks/AWS Security Best Practices", - ], - "FirstObservedAt": iso8601Time, - "CreatedAt": iso8601Time, - "UpdatedAt": iso8601Time, - "Severity": {"Label": "MEDIUM"}, - "Confidence": 99, - "Title": "[ECR.4] The latest image in an ECR Repository should not have any vulnerabilities", - "Description": f"The latest image {imageDigest} in the ECR repository {repoName} has {imageVulnCheck} vulnerabilities reported by ECR Basic Scans. The latest image is likely the last used or is likely active in your environment, while container vulnerabilities can be transient and harder to exploit, it is important for your security hygeine and threat reduction that active images are aggressively patched and minimized. Refer to the remediation instructions as well as your ECR Basic or Full (Inspector) scan results.", - "Remediation": { - "Recommendation": { - "Text": "For more information about scanning images refer to the Image Scanning section of the Amazon ECR User Guide", - "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html", - } - }, - "ProductFields": { - "ProductName": "ElectricEye", - "Provider": "AWS", - "ProviderType": "CSP", - "ProviderAccountId": awsAccountId, - "AssetRegion": awsRegion, - "AssetDetails": assetB64, - "AssetClass": "Containers", - "AssetService": "Amazon Elastic Container Registry", - "AssetComponent": "Image" - }, - "Resources": [ - { - "Type": "Container", - "Id": f"arn:{awsPartition}:ecr:{awsRegion}:{awsAccountId}:image/{repoName}:{imageTag}", - "Partition": awsPartition, - "Region": awsRegion, - "Details": { - "Container": { - "Name": f"{repoName}:{imageTag}", - "ImageId": imageDigest - } + if repo["imageScanningConfiguration"]["scanOnPush"] is True: + for image in ecr.describe_images(repositoryName=repoName, filter={"tagStatus": "TAGGED"}, maxResults=1000)["imageDetails"]: + assetJson = json.dumps(image,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + imageDigest = image["imageDigest"] + # use the first tag only as we need it to create the canonical ID for the Resource.Id in the ASFF for the Container Resource.Type + imageTag = image["imageTags"][0] + + # Evaluate if there are any vulnerabilities + imageHasVulns = False + try: + imageVulnCheck = image["imageScanFindingsSummary"]["findingSeverityCounts"] + imageHasVulns = True + except KeyError: + imageHasVulns = False + + # This is a failing check + if imageHasVulns is True: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"arn:{awsPartition}:ecr:{awsRegion}:{awsAccountId}:image/{repoName}:{imageTag}/ecr-latest-image-vuln-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": imageDigest, + "AwsAccountId": awsAccountId, + "Types": [ + "Software and Configuration Checks/Vulnerabilities/CVE", + "Software and Configuration Checks/AWS Security Best Practices", + ], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "MEDIUM"}, + "Confidence": 99, + "Title": "[ECR.4] The latest image in an ECR Repository should not have any vulnerabilities", + "Description": f"The latest image {imageDigest} in the ECR repository {repoName} has {imageVulnCheck} vulnerabilities reported by ECR Basic Scans. The latest image is likely the last used or is likely active in your environment, while container vulnerabilities can be transient and harder to exploit, it is important for your security hygeine and threat reduction that active images are aggressively patched and minimized. Refer to the remediation instructions as well as your ECR Basic or Full (Inspector) scan results.", + "Remediation": { + "Recommendation": { + "Text": "For more information about scanning images refer to the Image Scanning section of the Amazon ECR User Guide", + "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html", + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "AWS", + "ProviderType": "CSP", + "ProviderAccountId": awsAccountId, + "AssetRegion": awsRegion, + "AssetDetails": assetB64, + "AssetClass": "Containers", + "AssetService": "Amazon Elastic Container Registry", + "AssetComponent": "Image" + }, + "Resources": [ + { + "Type": "Container", + "Id": f"arn:{awsPartition}:ecr:{awsRegion}:{awsAccountId}:image/{repoName}:{imageTag}", + "Partition": awsPartition, + "Region": awsRegion, + "Details": { + "Container": { + "Name": f"{repoName}:{imageTag}", + "ImageId": imageDigest } } - ], - "Compliance": { - "Status": "FAILED", - "RelatedRequirements": [ - "NIST CSF V1.1 DE.CM-8", - "NIST CSF V1.1 ID.RA-1", - "NIST SP 800-53 Rev. 4 CA-2", - "NIST SP 800-53 Rev. 4 CA-7", - "NIST SP 800-53 Rev. 4 CA-8", - "NIST SP 800-53 Rev. 4 RA-3", - "NIST SP 800-53 Rev. 4 RA-5", - "NIST SP 800-53 Rev. 4 SA-5", - "NIST SP 800-53 Rev. 4 SA-11", - "NIST SP 800-53 Rev. 4 SI-2", - "NIST SP 800-53 Rev. 4 SI-4", - "NIST SP 800-53 Rev. 4 SI-5", - "AICPA TSC CC3.2", - "AICPA TSC CC7.1", - "ISO 27001:2013 A.12.6.1", - "ISO 27001:2013 A.12.6.4", - "ISO 27001:2013 A.18.2.3" - ] - }, - "Workflow": {"Status": "NEW"}, - "RecordState": "ACTIVE" - } - yield finding - else: - finding = { - "SchemaVersion": "2018-10-08", - "Id": f"arn:{awsPartition}:ecr:{awsRegion}:{awsAccountId}:image/{repoName}:{imageTag}/ecr-latest-image-vuln-check", - "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", - "GeneratorId": imageDigest, - "AwsAccountId": awsAccountId, - "Types": [ - "Software and Configuration Checks/Vulnerabilities/CVE", - "Software and Configuration Checks/AWS Security Best Practices", - ], - "FirstObservedAt": iso8601Time, - "CreatedAt": iso8601Time, - "UpdatedAt": iso8601Time, - "Severity": {"Label": "INFORMATIONAL"}, - "Confidence": 99, - "Title": "[ECR.4] The latest image in an ECR Repository should not have any vulnerabilities", - "Description": f"The latest image {imageDigest} in the ECR repository {repoName} does not have any vulnerabilities reported, good job!.", - "Remediation": { - "Recommendation": { - "Text": "For more information about scanning images refer to the Image Scanning section of the Amazon ECR User Guide", - "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html", - } - }, - "ProductFields": { - "ProductName": "ElectricEye", - "Provider": "AWS", - "ProviderType": "CSP", - "ProviderAccountId": awsAccountId, - "AssetRegion": awsRegion, - "AssetDetails": assetB64, - "AssetClass": "Containers", - "AssetService": "Amazon Elastic Container Registry", - "AssetComponent": "Image" - }, - "Resources": [ - { - "Type": "Container", - "Id": f"arn:{awsPartition}:ecr:{awsRegion}:{awsAccountId}:image/{repoName}:{imageTag}", - "Partition": awsPartition, - "Region": awsRegion, - "Details": { - "Container": { - "Name": f"{repoName}:{imageTag}", - "ImageId": imageDigest - } + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 DE.CM-8", + "NIST CSF V1.1 ID.RA-1", + "NIST SP 800-53 Rev. 4 CA-2", + "NIST SP 800-53 Rev. 4 CA-7", + "NIST SP 800-53 Rev. 4 CA-8", + "NIST SP 800-53 Rev. 4 RA-3", + "NIST SP 800-53 Rev. 4 RA-5", + "NIST SP 800-53 Rev. 4 SA-5", + "NIST SP 800-53 Rev. 4 SA-11", + "NIST SP 800-53 Rev. 4 SI-2", + "NIST SP 800-53 Rev. 4 SI-4", + "NIST SP 800-53 Rev. 4 SI-5", + "AICPA TSC CC3.2", + "AICPA TSC CC7.1", + "ISO 27001:2013 A.12.6.1", + "ISO 27001:2013 A.12.6.4", + "ISO 27001:2013 A.18.2.3" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + # This is a passing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"arn:{awsPartition}:ecr:{awsRegion}:{awsAccountId}:image/{repoName}:{imageTag}/ecr-latest-image-vuln-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": imageDigest, + "AwsAccountId": awsAccountId, + "Types": [ + "Software and Configuration Checks/Vulnerabilities/CVE", + "Software and Configuration Checks/AWS Security Best Practices", + ], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[ECR.4] The latest image in an ECR Repository should not have any vulnerabilities", + "Description": f"The latest image {imageDigest} in the ECR repository {repoName} does not have any vulnerabilities reported, good job!", + "Remediation": { + "Recommendation": { + "Text": "For more information about scanning images refer to the Image Scanning section of the Amazon ECR User Guide", + "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html", + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "AWS", + "ProviderType": "CSP", + "ProviderAccountId": awsAccountId, + "AssetRegion": awsRegion, + "AssetDetails": assetB64, + "AssetClass": "Containers", + "AssetService": "Amazon Elastic Container Registry", + "AssetComponent": "Image" + }, + "Resources": [ + { + "Type": "Container", + "Id": f"arn:{awsPartition}:ecr:{awsRegion}:{awsAccountId}:image/{repoName}:{imageTag}", + "Partition": awsPartition, + "Region": awsRegion, + "Details": { + "Container": { + "Name": f"{repoName}:{imageTag}", + "ImageId": imageDigest } } - ], - "Compliance": { - "Status": "PASSED", - "RelatedRequirements": [ - "NIST CSF V1.1 DE.CM-8", - "NIST CSF V1.1 ID.RA-1", - "NIST SP 800-53 Rev. 4 CA-2", - "NIST SP 800-53 Rev. 4 CA-7", - "NIST SP 800-53 Rev. 4 CA-8", - "NIST SP 800-53 Rev. 4 RA-3", - "NIST SP 800-53 Rev. 4 RA-5", - "NIST SP 800-53 Rev. 4 SA-5", - "NIST SP 800-53 Rev. 4 SA-11", - "NIST SP 800-53 Rev. 4 SI-2", - "NIST SP 800-53 Rev. 4 SI-4", - "NIST SP 800-53 Rev. 4 SI-5", - "AICPA TSC CC3.2", - "AICPA TSC CC7.1", - "ISO 27001:2013 A.12.6.1", - "ISO 27001:2013 A.12.6.4", - "ISO 27001:2013 A.18.2.3" - ] - }, - "Workflow": {"Status": "RESOLVED"}, - "RecordState": "ARCHIVED" - } - yield finding - except Exception as e: - print(e) - else: - pass + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 DE.CM-8", + "NIST CSF V1.1 ID.RA-1", + "NIST SP 800-53 Rev. 4 CA-2", + "NIST SP 800-53 Rev. 4 CA-7", + "NIST SP 800-53 Rev. 4 CA-8", + "NIST SP 800-53 Rev. 4 RA-3", + "NIST SP 800-53 Rev. 4 RA-5", + "NIST SP 800-53 Rev. 4 SA-5", + "NIST SP 800-53 Rev. 4 SA-11", + "NIST SP 800-53 Rev. 4 SI-2", + "NIST SP 800-53 Rev. 4 SI-4", + "NIST SP 800-53 Rev. 4 SI-5", + "AICPA TSC CC3.2", + "AICPA TSC CC7.1", + "ISO 27001:2013 A.12.6.1", + "ISO 27001:2013 A.12.6.4", + "ISO 27001:2013 A.18.2.3" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding @registry.register_check("ecr") def ecr_registry_policy_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: @@ -780,11 +793,19 @@ def ecr_registry_policy_check(cache: dict, session, awsAccountId: str, awsRegion ecr = session.client("ecr") iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() registryArn = f"arn:{awsPartition}:ecr:{awsRegion}:{awsAccountId}:registry" + + # determine if a registry policy is configured + ecrRegistryPolicy = True try: policy = ecr.get_registry_policy() # B64 encode all of the details for the Asset assetJson = json.dumps(policy,default=str).encode("utf-8") assetB64 = base64.b64encode(assetJson) + except botocore.exceptions.ClientError: + ecrRegistryPolicy = False + assetB64 = None + + if ecrRegistryPolicy is True: # This is a passing check finding = { "SchemaVersion": "2018-10-08", @@ -863,91 +884,85 @@ def ecr_registry_policy_check(cache: dict, session, awsAccountId: str, awsRegion "RecordState": "ARCHIVED", } yield finding - except botocore.exceptions.ClientError as error: - if error.response["Error"]["Code"] == "RegistryPolicyNotFoundException": - assetB64 = None - # this is a failing check - finding = { - "SchemaVersion": "2018-10-08", - "Id": f"{registryArn}/ecr-registry-access-policy-check", - "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", - "GeneratorId": awsAccountId + awsRegion, - "AwsAccountId": awsAccountId, - "Types": ["Software and Configuration Checks/AWS Security Best Practices"], - "FirstObservedAt": iso8601Time, - "CreatedAt": iso8601Time, - "UpdatedAt": iso8601Time, - "Severity": {"Label": "LOW"}, - "Confidence": 99, - "Title": "[ECR.5] ECR Registires should be have a registry policy configured to allow for cross-account recovery", - "Description": "ECR Registry " - + awsAccountId - + " in Region " - + awsRegion - + " does not have a registry policy configured. ECR uses a registry policy to grant permissions to an AWS principal, allowing the replication of the repositories from a source registry to your registry. By default, you have permission to configure cross-Region replication within your own registry. You only need to configure the registry policy if you're granting another account permission to replicate contents to your registry. Refer to the remediation instructions if this configuration is not intended", - "Remediation": { - "Recommendation": { - "Text": "If your Registry should be configured to have a Registry policy refer to the Private registry permissions section in the Amazon ECR User Guide", - "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html" - } - }, - "ProductFields": { - "ProductName": "ElectricEye", - "Provider": "AWS", - "ProviderType": "CSP", - "ProviderAccountId": awsAccountId, - "AssetRegion": awsRegion, - "AssetDetails": assetB64, - "AssetClass": "Containers", - "AssetService": "Amazon Elastic Container Registry", - "AssetComponent": "Registry" - }, - "Resources": [ - { - "Type": "AwsEcrRegistry", - "Id": registryArn, - "Partition": awsPartition, - "Region": awsRegion, - "Details": {"Other": {"RegistryId": awsAccountId}}, - } - ], - "Compliance": { - "Status": "FAILED", - "RelatedRequirements": [ - "NIST CSF V1.1 ID.BE-5", - "NIST CSF V1.1 PR.IP-4", - "NIST CSF V1.1 PR.PT-5", - "NIST SP 800-53 Rev. 4 CP-2", - "NIST SP 800-53 Rev. 4 CP-4", - "NIST SP 800-53 Rev. 4 CP-6", - "NIST SP 800-53 Rev. 4 CP-7", - "NIST SP 800-53 Rev. 4 CP-8", - "NIST SP 800-53 Rev. 4 CP-9", - "NIST SP 800-53 Rev. 4 CP-11", - "NIST SP 800-53 Rev. 4 CP-13", - "NIST SP 800-53 Rev. 4 PL-8", - "NIST SP 800-53 Rev. 4 SA-14", - "NIST SP 800-53 Rev. 4 SC-6", - "AICPA TSC A1.2", - "AICPA TSC A1.3", - "AICPA TSC CC3.1", - "ISO 27001:2013 A.11.1.4", - "ISO 27001:2013 A.12.3.1", - "ISO 27001:2013 A.17.1.1", - "ISO 27001:2013 A.17.1.2", - "ISO 27001:2013 A.17.1.3", - "ISO 27001:2013 A.17.2.1", - "ISO 27001:2013 A.18.1.3" - ] - }, - "Workflow": {"Status": "NEW"}, - "RecordState": "ACTIVE", - } - yield finding - else: - print(error) - except Exception as e: - print(e) + else: + # this is a failing check + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{registryArn}/ecr-registry-access-policy-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": awsAccountId + awsRegion, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[ECR.5] ECR Registires should be have a registry policy configured to allow for cross-account recovery", + "Description": "ECR Registry " + + awsAccountId + + " in Region " + + awsRegion + + " does not have a registry policy configured. ECR uses a registry policy to grant permissions to an AWS principal, allowing the replication of the repositories from a source registry to your registry. By default, you have permission to configure cross-Region replication within your own registry. You only need to configure the registry policy if you're granting another account permission to replicate contents to your registry. Refer to the remediation instructions if this configuration is not intended", + "Remediation": { + "Recommendation": { + "Text": "If your Registry should be configured to have a Registry policy refer to the Private registry permissions section in the Amazon ECR User Guide", + "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "AWS", + "ProviderType": "CSP", + "ProviderAccountId": awsAccountId, + "AssetRegion": awsRegion, + "AssetDetails": assetB64, + "AssetClass": "Containers", + "AssetService": "Amazon Elastic Container Registry", + "AssetComponent": "Registry" + }, + "Resources": [ + { + "Type": "AwsEcrRegistry", + "Id": registryArn, + "Partition": awsPartition, + "Region": awsRegion, + "Details": {"Other": {"RegistryId": awsAccountId}}, + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 ID.BE-5", + "NIST CSF V1.1 PR.IP-4", + "NIST CSF V1.1 PR.PT-5", + "NIST SP 800-53 Rev. 4 CP-2", + "NIST SP 800-53 Rev. 4 CP-4", + "NIST SP 800-53 Rev. 4 CP-6", + "NIST SP 800-53 Rev. 4 CP-7", + "NIST SP 800-53 Rev. 4 CP-8", + "NIST SP 800-53 Rev. 4 CP-9", + "NIST SP 800-53 Rev. 4 CP-11", + "NIST SP 800-53 Rev. 4 CP-13", + "NIST SP 800-53 Rev. 4 PL-8", + "NIST SP 800-53 Rev. 4 SA-14", + "NIST SP 800-53 Rev. 4 SC-6", + "AICPA TSC A1.2", + "AICPA TSC A1.3", + "AICPA TSC CC3.1", + "ISO 27001:2013 A.11.1.4", + "ISO 27001:2013 A.12.3.1", + "ISO 27001:2013 A.17.1.1", + "ISO 27001:2013 A.17.1.2", + "ISO 27001:2013 A.17.1.3", + "ISO 27001:2013 A.17.2.1", + "ISO 27001:2013 A.18.1.3" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE", + } + yield finding @registry.register_check("ecr") def ecr_registry_backup_rules_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: diff --git a/eeauditor/auditors/aws/Amazon_S3_Auditor.py b/eeauditor/auditors/aws/Amazon_S3_Auditor.py index 9d0be48c..48006759 100644 --- a/eeauditor/auditors/aws/Amazon_S3_Auditor.py +++ b/eeauditor/auditors/aws/Amazon_S3_Auditor.py @@ -779,12 +779,14 @@ def aws_s3_bucket_policy_check(cache: dict, session, awsAccountId: str, awsRegio assetB64 = base64.b64encode(assetJson) bucketName = buckets["Name"] s3Arn = f"arn:{awsPartition}:s3:::{bucketName}" + + bucketHasPolicy = True # Check to see if there is a policy at all try: s3.get_bucket_policy(Bucket=bucketName) - bucketHasPolicy = True except ClientError: bucketHasPolicy = False + # this is a failing check if bucketHasPolicy is False: finding = { @@ -1013,6 +1015,7 @@ def aws_s3_bucket_access_logging_check(cache: dict, session, awsAccountId: str, assetB64 = base64.b64encode(assetJson) bucketName = buckets["Name"] s3Arn = f"arn:{awsPartition}:s3:::{bucketName}" + # attempt to get server access logging try: s3.get_bucket_logging(Bucket=bucketName)["LoggingEnabled"] @@ -1021,6 +1024,7 @@ def aws_s3_bucket_access_logging_check(cache: dict, session, awsAccountId: str, bucketServerLogging = False except KeyError: bucketServerLogging = False + # this is a passing check if bucketServerLogging is True: finding = { @@ -1222,6 +1226,7 @@ def s3_account_level_block(cache: dict, session, awsAccountId: str, awsRegion: s # B64 encode all of the details for the Asset assetJson = json.dumps(blocker,default=str).encode("utf-8") assetB64 = base64.b64encode(assetJson) + # If they're all True it's good if ( blocker["BlockPublicAcls"] @@ -1232,7 +1237,7 @@ def s3_account_level_block(cache: dict, session, awsAccountId: str, awsRegion: s accountPublicBlock = True else: accountPublicBlock = False - except Exception: + except ClientError or Exception: accountPublicBlock = False assetB64 = None @@ -1484,7 +1489,7 @@ def aws_s3_bucket_deny_http_access_check(cache: dict, session, awsAccountId: str blockHttpObjectAccess = False # This is a failing check - if blockHttpObjectAccess is not True: + if blockHttpObjectAccess is False: finding = { "SchemaVersion": "2018-10-08", "Id": f"{s3Arn}/s3-bucket-block-insecure-http-access-check", diff --git a/eeauditor/auditors/snowflake/Snowflake_Account_Auditor.py b/eeauditor/auditors/snowflake/Snowflake_Account_Auditor.py new file mode 100644 index 00000000..e8cc7a83 --- /dev/null +++ b/eeauditor/auditors/snowflake/Snowflake_Account_Auditor.py @@ -0,0 +1,2035 @@ +#This file is part of ElectricEye. +#SPDX-License-Identifier: Apache-2.0 + +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +#http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, +#software distributed under the License is distributed on an +#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +#KIND, either express or implied. See the License for the +#specific language governing permissions and limitations +#under the License. + +import logging +from datetime import datetime, timezone, UTC +from snowflake.connector import cursor +from check_register import CheckRegister +import base64 +import json + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("SnowflakeAccountAuditor") + +registry = CheckRegister() + +def get_snowflake_security_integrations(cache: dict, snowflakeCursor: cursor.SnowflakeCursor) -> dict: + """ + Get the Snowflake security integrations for the account from the SHOW INTEGRATIONS query. + """ + response = cache.get("get_snowflake_security_integrations") + if response: + return response + + query = "SHOW INTEGRATIONS" + + cache["get_snowflake_security_integrations"] = snowflakeCursor.execute(query).fetchall() + + return cache["get_snowflake_security_integrations"] + +def get_snowflake_password_policy(cache: dict, snowflakeCursor: cursor.SnowflakeCursor) -> dict: + """ + Get the Snowflake password policy for the account from the ACCOUNT_USAGE.PASSWORD_POLICIES view. + """ + response = cache.get("get_snowflake_password_policy") + if response: + return response + + query = "SELECT * FROM SNOWFLAKE.ACCOUNT_USAGE.PASSWORD_POLICIES" + + cache["get_snowflake_password_policy"] = snowflakeCursor.execute(query).fetchall() + + return cache["get_snowflake_password_policy"] + +@registry.register_check("snowflake.account") +def snowflake_account_sso_enabled_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Account.1] Snowflake Accounts have Single Sign-On (SSO) enabled""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + payload = get_snowflake_security_integrations(cache, snowflakeCursor) + + ssoCheck = [integ for integ in payload if "saml" in str(integ["type"]).lower() or "oauth" in str(integ["type"]).lower()] + + # B64 encode all of the details for the Asset + assetJson = json.dumps(ssoCheck,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + # this is a passing check + if ssoCheck: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-sso-enabled-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.1] Snowflake Accounts have Single Sign-On (SSO) enabled", + "Description": f"Snowflake account {snowflakeAccountId} has Single Sign-On (SSO) enabled either via SAML or External OAUTH.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for setting up federated authentication or SSO in Snowflake refer to the Overview of federated authentication and SSO section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/admin-security-fed-auth-overview" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-6", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 AC-3", + "NIST SP 800-53 Rev. 4 AC-16", + "NIST SP 800-53 Rev. 4 AC-19", + "NIST SP 800-53 Rev. 4 AC-24", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 PE-2", + "NIST SP 800-53 Rev. 4 PS-3", + "AICPA TSC CC6.1", + "ISO 27001:2013 A.7.1.1", + "ISO 27001:2013 A.9.2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 1.1", + "CIS Snowflake Foundations Benchmark V1.0.0 2.3", + "CIS Snowflake Foundations Benchmark V1.0.0 2.5" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-sso-enabled-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "MEDIUM"}, + "Confidence": 99, + "Title": "[Snowflake.Account.1] Snowflake Accounts have Single Sign-On (SSO) enabled", + "Description": f"Snowflake account {snowflakeAccountId} does not have Single Sign-On (SSO) enabled neither via SAML nor External OAUTH. Federated authentication enables users to connect to Snowflake using secure SSO (single sign-on). With SSO enabled, users authenticate through an external (SAML 2.0-compliant or OAuth 2.0) identity provider (IdP). Once authenticated by an IdP, users can access their Snowflake account for the duration of their IdP session without having to authenticate to Snowflake again. Users can choose to initiate their sessions from within the interface provided by the IdP or directly in Snowflake. Configuring your Snowflake authentication so that users can log in using SSO reduces the attack surface for your organization because users only log in once across multiple applications and do not have to manage a separate set of credentials for their Snowflake account.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for setting up federated authentication or SSO in Snowflake refer to the Overview of federated authentication and SSO section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/admin-security-fed-auth-overview" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-6", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 AC-3", + "NIST SP 800-53 Rev. 4 AC-16", + "NIST SP 800-53 Rev. 4 AC-19", + "NIST SP 800-53 Rev. 4 AC-24", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 PE-2", + "NIST SP 800-53 Rev. 4 PS-3", + "AICPA TSC CC6.1", + "ISO 27001:2013 A.7.1.1", + "ISO 27001:2013 A.9.2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 1.1", + "CIS Snowflake Foundations Benchmark V1.0.0 2.3", + "CIS Snowflake Foundations Benchmark V1.0.0 2.5" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.account") +def snowflake_account_scim_enabled_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Account.2] Snowflake Accounts have SCIM enabled""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + payload = get_snowflake_security_integrations(cache, snowflakeCursor) + + scimCheck = [integ for integ in payload if str(integ["type"]).lower() == "scim"] + + # B64 encode all of the details for the Asset + assetJson = json.dumps(scimCheck,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + # this is a passing check + if scimCheck: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-scim-enabled-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.2] Snowflake Accounts have SCIM enabled", + "Description": f"Snowflake account {snowflakeAccountId} has System for Cross-domain Identity Management (SCIM) enabled.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for setting up federated authentication or SSO in Snowflake refer to the Overview of federated authentication and SSO section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/admin-security-fed-auth-overview" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 ID.BE-5", + "NIST CSF V1.1 PR.DS-4", + "NIST CSF V1.1 PR.PT-5", + "NIST SP 800-53 Rev. 4 AU-4", + "NIST SP 800-53 Rev. 4 CP-2", + "NIST SP 800-53 Rev. 4 CP-7", + "NIST SP 800-53 Rev. 4 CP-8", + "NIST SP 800-53 Rev. 4 CP-11", + "NIST SP 800-53 Rev. 4 CP-13", + "NIST SP 800-53 Rev. 4 PL-8", + "NIST SP 800-53 Rev. 4 SA-14", + "NIST SP 800-53 Rev. 4 SC-5", + "NIST SP 800-53 Rev. 4 SC-6", + "AICPA TSC CC3.1", + "AICPA TSC A1.1", + "AICPA TSC A1.2", + "ISO 27001:2013 A.11.1.4", + "ISO 27001:2013 A.12.3.1", + "ISO 27001:2013 A.17.1.1", + "ISO 27001:2013 A.17.1.2", + "ISO 27001:2013 A.17.2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 1.2", + "CIS Snowflake Foundations Benchmark V1.0.0 2.3", + "CIS Snowflake Foundations Benchmark V1.0.0 2.5", + "CIS Snowflake Foundations Benchmark V1.0.0 2.7" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-scim-enabled-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Snowflake.Account.2] Snowflake Accounts have SCIM enabled", + "Description": f"Snowflake account {snowflakeAccountId} does not have System for Cross-domain Identity Management (SCIM) enabled. SCIM is an open specification designed to help facilitate the automated management of user identities and groups (i.e. roles) in cloud applications using RESTful APIs. Snowflake supports SCIM 2.0 integration with Okta, Microsoft Azure AD and custom identity providers. Users and groups from the identity provider can be provisioned into Snowflake, which functions as the service provider. While SSO enables seamless authentication with a federated identity to the Snowflake application, user accounts still need to be created, managed, and deprovisioned. Operations like adding and deleting users, changing permissions, and adding new types of accounts usually take up valuable admin time and when done manually may be error-prone. With SCIM, user identities can be created either directly in your identity provider, or imported from external systems like HR software or Active Directory. SCIM enables IT departments to automate the user provisioning and deprovisioning process while also having a single system to manage permissions and groups. Since data is transferred automatically, risk of error is reduced.", + "Remediation": { + "Recommendation": { + "Text": "For information on setting up SCIM in Snowflake refer to the CREATE SECURITY INTEGRATION (SCIM) section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/sql-reference/sql/create-security-integration-scim#examples" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 ID.BE-5", + "NIST CSF V1.1 PR.DS-4", + "NIST CSF V1.1 PR.PT-5", + "NIST SP 800-53 Rev. 4 AU-4", + "NIST SP 800-53 Rev. 4 CP-2", + "NIST SP 800-53 Rev. 4 CP-7", + "NIST SP 800-53 Rev. 4 CP-8", + "NIST SP 800-53 Rev. 4 CP-11", + "NIST SP 800-53 Rev. 4 CP-13", + "NIST SP 800-53 Rev. 4 PL-8", + "NIST SP 800-53 Rev. 4 SA-14", + "NIST SP 800-53 Rev. 4 SC-5", + "NIST SP 800-53 Rev. 4 SC-6", + "AICPA TSC CC3.1", + "AICPA TSC A1.1", + "AICPA TSC A1.2", + "ISO 27001:2013 A.11.1.4", + "ISO 27001:2013 A.12.3.1", + "ISO 27001:2013 A.17.1.1", + "ISO 27001:2013 A.17.1.2", + "ISO 27001:2013 A.17.2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 1.2", + "CIS Snowflake Foundations Benchmark V1.0.0 2.3", + "CIS Snowflake Foundations Benchmark V1.0.0 2.5", + "CIS Snowflake Foundations Benchmark V1.0.0 2.7" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.account") +def snowflake_admin_15min_session_timeout_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Account.3] Snowflake Accounts should ensure that admins roles have a 15 minute session timeout""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + query = """ + WITH PRIV_USERS AS ( SELECT DISTINCT GRANTEE_NAME FROM SNOWFLAKE.ACCOUNT_USAGE.GRANTS_TO_USERS WHERE DELETED_ON IS NULL AND ROLE IN ('ACCOUNTADMIN','SECURITYADMIN') AND DELETED_ON IS NULL ), POLICY_REFS AS ( SELECT * FROM SNOWFLAKE.ACCOUNT_USAGE.POLICY_REFERENCES AS A LEFT JOIN SNOWFLAKE.ACCOUNT_USAGE.SESSION_POLICIES AS B ON A.POLICY_ID = B.ID WHERE A.POLICY_KIND = 'SESSION_POLICY' AND A.POLICY_STATUS = 'ACTIVE' AND A.REF_ENTITY_DOMAIN = 'USER' AND B.DELETED IS NULL AND B.SESSION_IDLE_TIMEOUT_MINS <= 15 ) SELECT A.*, B.POLICY_ID, B.POLICY_KIND, B.POLICY_STATUS, B.SESSION_IDLE_TIMEOUT_MINS FROM PRIV_USERS AS A LEFT JOIN POLICY_REFS AS B ON A.GRANTEE_NAME = B.REF_ENTITY_NAME WHERE B.POLICY_ID IS NULL; + """ + + # execute the CIS query, works pretty well actually...this SHOULDN'T return anything for it to pass + q = snowflakeCursor.execute(query).fetchall() + + # B64 encode all of the details for the Asset + assetJson = json.dumps(q,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + # this is a passing check + if not q: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-admin-session-timeout-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.3] Snowflake Accounts should ensure that admins roles have a 15 minute session timeout", + "Description": f"Snowflake account {snowflakeAccountId} configures session timeouts to 15 minutes or less for all users with SECURITYADMIN and/or ACCOUNTADMIN roles.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for setting up federated authentication or SSO in Snowflake refer to the Overview of federated authentication and SSO section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/admin-security-fed-auth-overview" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.IP-7", + "NIST CSF V1.1 RS.AN-1", + "NIST SP 800-53 Rev. 4 CA-2", + "NIST SP 800-53 Rev. 4 CA-7", + "NIST SP 800-53 Rev. 4 CP-2", + "NIST SP 800-53 Rev. 4 IR-8", + "NIST SP 800-53 Rev. 4 PL-2", + "NIST SP 800-53 Rev. 4 PM-6", + "NIST SP 800-53 Rev. 4 AU-6", + "NIST SP 800-53 Rev. 4 IR-4", + "NIST SP 800-53 Rev. 4 IR-5", + "NIST SP 800-53 Rev. 4 PE-6", + "NIST SP 800-53 Rev. 4 SI-4", + "AICPA TSC CC4.2", + "AICPA TSC CC5.1", + "AICPA TSC CC5.3", + "AICPA TSC CC7.3", + "ISO 27001:2013 A.12.4.1", + "ISO 27001:2013 A.12.4.3", + "ISO 27001:2013 A.16.1.5", + "CIS Snowflake Foundations Benchmark V1.0.0 1.9", + "CIS Snowflake Foundations Benchmark V1.0.0 2.1" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-admin-session-timeout-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "MEDUIM"}, + "Confidence": 99, + "Title": "[Snowflake.Account.3] Snowflake Accounts should ensure that admins roles have a 15 minute session timeout", + "Description": f"Snowflake account {snowflakeAccountId} does not configure session timeouts to 15 minutes or less for all users with SECURITYADMIN and/or ACCOUNTADMIN roles. A session begins when a user connects to Snowflake and authenticates successfully using a Snowflake programmatic client, Snowsight, or the classic web interface. A session is maintained indefinitely with continued user activity. After a period of inactivity in the session, known as the idle session timeout, the user must authenticate to Snowflake again. Session policies can be used to modify the idle session timeout period. The idle session timeout has a maximum value of four hours. Tightening up the idle session timeout reduces sensitive data exposure risk when users forget to sign out of Snowflake and an unauthorized person gains access to their device. For more information on session policies in Snowflake refer to the Session Policies section of the Snowflake Documentation Portal.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for setting up session policies in Snowflake refer to the Snowflake Sessions & Session Policies section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/session-policies" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.IP-7", + "NIST CSF V1.1 RS.AN-1", + "NIST SP 800-53 Rev. 4 CA-2", + "NIST SP 800-53 Rev. 4 CA-7", + "NIST SP 800-53 Rev. 4 CP-2", + "NIST SP 800-53 Rev. 4 IR-8", + "NIST SP 800-53 Rev. 4 PL-2", + "NIST SP 800-53 Rev. 4 PM-6", + "NIST SP 800-53 Rev. 4 AU-6", + "NIST SP 800-53 Rev. 4 IR-4", + "NIST SP 800-53 Rev. 4 IR-5", + "NIST SP 800-53 Rev. 4 PE-6", + "NIST SP 800-53 Rev. 4 SI-4", + "AICPA TSC CC4.2", + "AICPA TSC CC5.1", + "AICPA TSC CC5.3", + "AICPA TSC CC7.3", + "ISO 27001:2013 A.12.4.1", + "ISO 27001:2013 A.12.4.3", + "ISO 27001:2013 A.16.1.5", + "CIS Snowflake Foundations Benchmark V1.0.0 1.9", + "CIS Snowflake Foundations Benchmark V1.0.0 2.1" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.account") +def snowflake_built_in_admin_roles_not_in_custom_role_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Account.4] Snowflake custom roles should not use built-in admin roles""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + query = """ + SELECT GRANTEE_NAME AS CUSTOM_ROLE, PRIVILEGE AS GRANTED_PRIVILEGE, NAME AS GRANTED_ROLE FROM SNOWFLAKE.ACCOUNT_USAGE.GRANTS_TO_ROLES WHERE GRANTED_ON = 'ROLE' AND NAME IN ('ACCOUNTADMIN','SECURITYADMIN') AND DELETED_ON IS NULL + """ + + q = snowflakeCursor.execute(query).fetchall() + # execute the CIS query, works pretty well for this too, the query should only return a single row: [{'CUSTOM_ROLE': 'ACCOUNTADMIN', 'GRANTED_PRIVILEGE': 'USAGE', 'GRANTED_ROLE': 'SECURITYADMIN'}]. If there is more than one entry in the returned list, or the entry does not match this, it's a fail + builtInAdminNotUsedInCustomRole = False + if len(q) == 1: + if q[0]["CUSTOM_ROLE"] == "ACCOUNTADMIN" and q[0]["GRANTED_PRIVILEGE"] == "USAGE" and q[0]["GRANTED_ROLE"] == "SECURITYADMIN": + builtInAdminNotUsedInCustomRole = True + + # B64 encode all of the details for the Asset + assetJson = json.dumps(q,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + # this is a passing check + if builtInAdminNotUsedInCustomRole is True: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-admin-session-timeout-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.4] Snowflake custom roles should not use built-in admin roles", + "Description": f"Snowflake account {snowflakeAccountId} does not use SECURITYADMIN and/or ACCOUNTADMIN roles within custom roles.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for setting up custom roles and general access control in Snowflake refer to the Overview of Access Control section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/security-access-control-overview" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-4", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 AC-3", + "NIST SP 800-53 Rev. 4 AC-5", + "NIST SP 800-53 Rev. 4 AC-6", + "NIST SP 800-53 Rev. 4 AC-16", + "AICPA TSC CC6.3", + "ISO 27001:2013 A.6.1.2", + "ISO 27001:2013 A.9.1.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.4.1", + "ISO 27001:2013 A.9.4.4", + "CIS Snowflake Foundations Benchmark V1.0.0 1.13", + "CIS Snowflake Foundations Benchmark V1.0.0 2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 2.2" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-admin-session-timeout-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.4] Snowflake custom roles should not use built-in admin roles", + "Description": f"Snowflake account {snowflakeAccountId} uses SECURITYADMIN and/or ACCOUNTADMIN roles within custom roles. The principle of least privilege requires that every identity is only given privileges that are necessary to complete its tasks. The ACCOUNTADMIN system role is the most powerful role in a Snowflake account and is intended for performing initial setup and managing account-level objects. SECURITYADMIN role can trivially escalate their privileges to that of ACCOUNTADMIN. Neither of these roles should be used for performing daily non-administrative tasks in a Snowflake account. Granting ACCOUNTADMIN role to any custom role effectively elevates privileges of that role to the ACCOUNTADMIN role privileges. Roles that include the ACCOUNTADMIN role can then be mistakenly used in access grants that do not require ACCOUNTADMIN privileges thus violating the principle of least privilege and increasing the attack surface. The same logic applies to the SECURITYADMIN role. For more information refer to the remediation section.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for setting up custom roles and general access control in Snowflake refer to the Overview of Access Control section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/security-access-control-overview" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-4", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 AC-3", + "NIST SP 800-53 Rev. 4 AC-5", + "NIST SP 800-53 Rev. 4 AC-6", + "NIST SP 800-53 Rev. 4 AC-16", + "AICPA TSC CC6.3", + "ISO 27001:2013 A.6.1.2", + "ISO 27001:2013 A.9.1.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.4.1", + "ISO 27001:2013 A.9.4.4", + "CIS Snowflake Foundations Benchmark V1.0.0 1.13", + "CIS Snowflake Foundations Benchmark V1.0.0 2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 2.2" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.account") +def snowflake_tasks_not_owned_by_admins_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Account.5] Snowflake tasks should not be owned by ACCOUNTADMIN or SECURITYADMIN roles""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + query = """ + SELECT NAME AS STORED_PROCEDURE_NAME, GRANTED_TO, GRANTEE_NAME AS ROLE_NAME, PRIVILEGE FROM SNOWFLAKE.ACCOUNT_USAGE.GRANTS_TO_ROLES WHERE GRANTED_ON = 'TASK' AND DELETED_ON IS NULL AND GRANTED_TO = 'ROLE' AND PRIVILEGE = 'OWNERSHIP' AND GRANTEE_NAME IN ('ACCOUNTADMIN' , 'SECURITYADMIN') + """ + # as long as this returns an empty list, it's a pass + q = snowflakeCursor.execute(query).fetchall() + + # B64 encode all of the details for the Asset + assetJson = json.dumps(q,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + # this is a passing check + if not q: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-tasks-owned-by-default-admin-roles-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.5] Snowflake tasks should not be owned by ACCOUNTADMIN or SECURITYADMIN roles", + "Description": f"Snowflake account {snowflakeAccountId} does not have any tasks owned by SECURITYADMIN and/or ACCOUNTADMIN roles. This check does not take into account tasks owned by custom roles using the ACCOUNTADMIN or SECURITYADMIN roles.", + "Remediation": { + "Recommendation": { + "Text": "For information on managing Snowflake Task ownership and general security best practices in Snowflake refer to the Manage task graph ownership section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/tasks-graphs#label-task-dag-ownership" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "CIS Snowflake Foundations Benchmark V1.0.0 1.14", + "CIS Snowflake Foundations Benchmark V1.0.0 2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 2.2" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-tasks-owned-by-default-admin-roles-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Snowflake.Account.5] Snowflake tasks should not be owned by ACCOUNTADMIN or SECURITYADMIN roles", + "Description": f"Snowflake account {snowflakeAccountId} has at least one task owned by SECURITYADMIN and/or ACCOUNTADMIN roles. The ACCOUNTADMIN system role is the most powerful role in a Snowflake account and is intended for performing initial setup and managing account-level objects. SECURITYADMIN role can trivially escalate their privileges to that of ACCOUNTADMIN. Neither of these roles should be used for running Snowflake tasks. A task should be running using a custom role containing only those privileges that are necessary for successful execution of the task. Snowflake executes tasks with the privileges of the task owner. The role that has OWNERSHIP privilege on the task owns the task. To avoid granting a task inappropriate privileges, the OWNERSHIP privilege on the task run as owner should be assigned to a custom role containing only those privileges that are necessary for successful execution of the task. This check does not take into account tasks owned by custom roles using the ACCOUNTADMIN or SECURITYADMIN roles. For more information refer to the remediation section.", + "Remediation": { + "Recommendation": { + "Text": "For information on managing Snowflake Task ownership and general security best practices in Snowflake refer to the Manage task graph ownership section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/tasks-graphs#label-task-dag-ownership" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "CIS Snowflake Foundations Benchmark V1.0.0 1.14", + "CIS Snowflake Foundations Benchmark V1.0.0 2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 2.2" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.account") +def snowflake_tasks_not_running_with_admin_privs_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Account.6] Snowflake tasks should not run with ACCOUNTADMIN or SECURITYADMIN role privileges""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + query = """ + SELECT NAME AS STORED_PROCEDURE_NAME, GRANTED_TO, GRANTEE_NAME AS ROLE_NAME, PRIVILEGE FROM SNOWFLAKE.ACCOUNT_USAGE.GRANTS_TO_ROLES WHERE GRANTED_ON = 'TASK' AND DELETED_ON IS NULL AND GRANTED_TO = 'ROLE' AND GRANTEE_NAME IN ('ACCOUNTADMIN' , 'SECURITYADMIN') + """ + # as usual, as long as this returns an empty list, it's a pass + q = snowflakeCursor.execute(query).fetchall() + + # B64 encode all of the details for the Asset + assetJson = json.dumps(q,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + # this is a passing check + if not q: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-tasks-run-with-admin-privileges-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.6] Snowflake tasks should not run with ACCOUNTADMIN or SECURITYADMIN role privileges", + "Description": f"Snowflake account {snowflakeAccountId} does not have any tasks that run with ACCOUNTADMIN or SECURITYADMIN role privileges. This check does not take into account tasks running with custom roles using the ACCOUNTADMIN or SECURITYADMIN roles.", + "Remediation": { + "Recommendation": { + "Text": "For information on managing Snowflake Task ownership and general security best practices in Snowflake refer to the Manage task graph ownership section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/tasks-graphs#label-task-dag-ownership" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "CIS Snowflake Foundations Benchmark V1.0.0 1.15", + "CIS Snowflake Foundations Benchmark V1.0.0 2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 2.2" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-tasks-run-with-admin-privileges-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.6] Snowflake tasks should not run with ACCOUNTADMIN or SECURITYADMIN role privileges", + "Description": f"Snowflake account {snowflakeAccountId} has at least one task that runs with ACCOUNTADMIN or SECURITYADMIN role privileges. The ACCOUNTADMIN system role is the most powerful role in a Snowflake account and is intended for performing initial setup and managing account-level objects. SECURITYADMIN role can trivially escalate their privileges to that of ACCOUNTADMIN. Neither of these roles should be used for running Snowflake tasks. A task should be running using a custom role containing only those privileges that are necessary for successful execution of the task. If a threat actor finds a way to influence or hijack the task execution flow, they may be able to exploit privileges given to the task. In the case of an ACCOUNTADMIN or SECURITYADMIN roles, that may lead to a full account takeover. Additionally, a mistake in the task implementation coupled with excessive privileges may lead to a reliability incident, e.g. accidentally dropping database objects. This check does not take into account tasks running with custom roles using the ACCOUNTADMIN or SECURITYADMIN roles.", + "Remediation": { + "Recommendation": { + "Text": "For information on managing Snowflake Task ownership and general security best practices in Snowflake refer to the Manage task graph ownership section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/tasks-graphs#label-task-dag-ownership" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "CIS Snowflake Foundations Benchmark V1.0.0 1.15", + "CIS Snowflake Foundations Benchmark V1.0.0 2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 2.2" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.account") +def snowflake_stored_procs_not_owned_by_admins_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Account.7] Snowflake stored procedures should not run with ACCOUNTADMIN or SECURITYADMIN role privileges""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + query = """ + SELECT * FROM SNOWFLAKE.ACCOUNT_USAGE.PROCEDURES WHERE DELETED IS NULL AND PROCEDURE_OWNER IN ('ACCOUNTADMIN','SECURITYADMIN') + """ + # as usual, as long as this returns an empty list, it's a pass + q = snowflakeCursor.execute(query).fetchall() + + # B64 encode all of the details for the Asset + assetJson = json.dumps(q,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + # this is a passing check + if not q: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-stored-procs-owned-by-default-admin-roles-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.7] Snowflake stored procedures should not run with ACCOUNTADMIN or SECURITYADMIN role privileges", + "Description": f"Snowflake account {snowflakeAccountId} does not have any stored procedures that are owned by ACCOUNTADMIN or SECURITYADMIN roles. This check does not take into account tasks running with custom roles using the ACCOUNTADMIN or SECURITYADMIN roles.", + "Remediation": { + "Recommendation": { + "Text": "For information on managing Snowflake Stored Procedure ownership and general security best practices in Snowflake refer to the Understanding caller's rights and owner's rights stored procedures section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/developer-guide/stored-procedure/stored-procedures-rights" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "CIS Snowflake Foundations Benchmark V1.0.0 1.16", + "CIS Snowflake Foundations Benchmark V1.0.0 2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 2.2" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-stored-procs-owned-by-default-admin-roles-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Snowflake.Account.7] Snowflake stored procedures should not run with ACCOUNTADMIN or SECURITYADMIN role privileges", + "Description": f"Snowflake account {snowflakeAccountId} does not have any stored procedures that are owned by ACCOUNTADMIN or SECURITYADMIN roles. Snowflake executes stored procedures with the privileges of the stored procedure owner or the caller. Role that has OWNERSHIP privilege on the stored procedure owns it. To avoid granting a stored procedure inappropriate privileges, the OWNERSHIP privilege on the stored procedure run as owner should be assigned to a custom role containing only those privileges that are necessary for successful execution of the stored procedure. If a threat actor finds a way to influence or hijack the stored procedure execution flow, they may be able to exploit privileges given to the stored procedure. In the case of an ACCOUNTADMIN or SECURITYADMIN roles, that may lead to a full account takeover. Additionally, a mistake in the stored procedure implementation coupled with excessive privileges may lead to a reliability incident, e.g. accidentally dropping database objects. This check does not take into account tasks running with custom roles using the ACCOUNTADMIN or SECURITYADMIN roles. For more information refer to the remediation section.", + "Remediation": { + "Recommendation": { + "Text": "For information on managing Snowflake Stored Procedure ownership and general security best practices in Snowflake refer to the Understanding caller's rights and owner's rights stored procedures section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/developer-guide/stored-procedure/stored-procedures-rights" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "CIS Snowflake Foundations Benchmark V1.0.0 1.16", + "CIS Snowflake Foundations Benchmark V1.0.0 2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 2.2" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.account") +def snowflake_stored_procs_not_running_with_admin_privs_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Account.8] Snowflake stored procedures should not run with ACCOUNTADMIN or SECURITYADMIN role privileges""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + query = """ + SELECT NAME AS STORED_PROCEDURE_NAME, GRANTED_TO, GRANTEE_NAME AS ROLE_NAME FROM SNOWFLAKE.ACCOUNT_USAGE.GRANTS_TO_ROLES WHERE GRANTED_ON = 'PROCEDURE' AND DELETED_ON IS NULL AND GRANTED_TO = 'ROLE' AND GRANTEE_NAME IN ('ACCOUNTADMIN' , 'SECURITYADMIN'); + """ + # as usual, as long as this returns an empty list, it's a pass + q = snowflakeCursor.execute(query).fetchall() + + # B64 encode all of the details for the Asset + assetJson = json.dumps(q,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + # this is a passing check + if not q: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-stored-procs-run-with-admin-privileges-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.8] Snowflake stored procedures should not run with ACCOUNTADMIN or SECURITYADMIN role privileges", + "Description": f"Snowflake account {snowflakeAccountId} does not have any stored procedures that run with ACCOUNTADMIN or SECURITYADMIN role privileges. This check does not take into account stored procedures running with custom roles using the ACCOUNTADMIN or SECURITYADMIN roles.", + "Remediation": { + "Recommendation": { + "Text": "For information on managing Snowflake Stored Procedure ownership and general security best practices in Snowflake refer to the Understanding caller's rights and owner's rights stored procedures section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/developer-guide/stored-procedure/stored-procedures-rights" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "CIS Snowflake Foundations Benchmark V1.0.0 1.17", + "CIS Snowflake Foundations Benchmark V1.0.0 2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 2.2" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-stored-procs-run-with-admin-privileges-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Snowflake.Account.8] Snowflake stored procedures should not run with ACCOUNTADMIN or SECURITYADMIN role privileges", + "Description": f"Snowflake account {snowflakeAccountId} has at least one stored procedure that runs with ACCOUNTADMIN or SECURITYADMIN role privileges. Snowflake stored procedures should not run with the ACCOUNTADMIN or SECURITYADMIN roles. Instead, stored procedures should be run using a custom role containing only those privileges that are necessary for successful execution of the stored procedure. If a threat actor finds a way to influence or hijack the stored procedure execution flow, they may be able to exploit privileges given to the stored procedure. In the case of an ACCOUNTADMIN or SECURITYADMIN roles, that may lead to a full account takeover. Additionally, a mistake in the stored procedure implementation coupled with excessive privileges may lead to a reliability incident, e.g. accidentally dropping database objects. This check does not take into account stored procedures running with custom roles using the ACCOUNTADMIN or SECURITYADMIN roles. For more information refer to the remediation section.", + "Remediation": { + "Recommendation": { + "Text": "For information on managing Snowflake Stored Procedure ownership and general security best practices in Snowflake refer to the Understanding caller's rights and owner's rights stored procedures section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/developer-guide/stored-procedure/stored-procedures-rights" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "CIS Snowflake Foundations Benchmark V1.0.0 1.17", + "CIS Snowflake Foundations Benchmark V1.0.0 2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 2.2" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.account") +def snowflake_account_password_policy_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Account.9] Snowflake Accounts should configure a password policy""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + payload = get_snowflake_password_policy(cache, snowflakeCursor) + + # B64 encode all of the details for the Asset + assetJson = json.dumps(payload,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + # this is a passing check + if payload: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-password-policy-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.9] Snowflake Accounts should configure a password policy", + "Description": f"Snowflake account {snowflakeAccountId} has at least one password policy configured.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for user management and password policies in Snowflake refer to the User management section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/admin-user-management" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-password-policy-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.9] Snowflake Accounts should configure a password policy", + "Description": f"Snowflake account {snowflakeAccountId} does not have a password policy configured. A password policy specifies the requirements that must be met to create and reset a password to authenticate to Snowflake. Beyond a strong password policy, Snowflake also supports multi-factor authentication (MFA) for additional security. A password policy should be configured to enforce strong password requirements, such as minimum length, complexity, and expiration. For more information on password policies in Snowflake refer to the User management section of the Snowflake Documentation Portal.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for user management and password policies in Snowflake refer to the User management section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/admin-user-management" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.account") +def snowflake_account_password_length_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Account.10] Snowflake password policies should enforce a minimum password length of at least 14 characters""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + for policy in get_snowflake_password_policy(cache, snowflakeCursor): + # B64 encode all of the details for the Asset + assetJson = json.dumps(policy,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + pwPolicyName = policy.get("NAME") + + # Evaluate min length only from "PASSWORD_MIN_LENGTH" key to set policy passing + pwPolicyPasses = False + if policy.get("PASSWORD_MIN_LENGTH") >= 14: + pwPolicyPasses = True + + # this is a passing check + if pwPolicyPasses is True: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{pwPolicyName}/snowflake-account-password-length-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.10] Snowflake password policies should enforce a minimum password length of at least 14 characters", + "Description": f"Snowflake password policy {pwPolicyName} requires at least 14 characters for the minimum password length.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for user management and password policies in Snowflake refer to the User management section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/admin-user-management" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Password Policy", + "AssetComponent": "Policy" + }, + "Resources": [ + { + "Type": "SnowflakePasswordPolicy", + "Id": pwPolicyName, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "CIS Snowflake Foundations Benchmark V1.0.0 1.5" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{pwPolicyName}/snowflake-account-password-length-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Snowflake.Account.10] Snowflake password policies should enforce a minimum password length of at least 14 characters", + "Description": f"Snowflake password policy {pwPolicyName} does not require at least 14 characters for the minimum password length. Snowflake supports setting a password policy for your Snowflake account and for individual users. Only one password policy can be set at any given time for your Snowflake account or a user. If a password policy exists for the Snowflake account and another password policy is set for a user in the same Snowflake account, the user-level password policy takes precedence over the account-level password policy. While Snowflake recommends configuring SSO authentication for users and ensuring that SSO users do not have a password set, there may be exceptions when users still need to log in with a password (e.g., setting up a break-glass user with password login to recover from SSO outages). For those few users that still need to have a password, setting a password policy can help ensure that, throughout subsequent password changes, the passwords used remain complex and therefore harder to guess or brute-force. For more information refer to the remediation section.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for user management and password policies in Snowflake refer to the User management section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/admin-user-management" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Password Policy", + "AssetComponent": "Policy" + }, + "Resources": [ + { + "Type": "SnowflakePasswordPolicy", + "Id": pwPolicyName, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "CIS Snowflake Foundations Benchmark V1.0.0 1.5" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.account") +def snowflake_monitor_session_keep_alive_commands_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Account.11] Snowflake Accounts should be monitored for users extending their sessions""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + query = """ + SELECT DISTINCT session_id FROM SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY + WHERE query_type = 'ALTER_SESSION' + AND query_text ilike '%CLIENT_SESSION_KEEP_ALIVE%TRUE%' + AND query_text not ilike '%CLIENT_SESSION_KEEP_ALIVE_HEARTBEAT_FREQUENCY%' + """ + + # execute the CIS query, works pretty well actually...this SHOULDN'T return anything for it to pass + q = snowflakeCursor.execute(query).fetchall() + + # B64 encode all of the details for the Asset + assetJson = json.dumps(q,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + # this is a passing check + if not q: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-session-timeout-keep-alive-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.11] Snowflake Accounts should be monitored for users extending their sessions", + "Description": f"Snowflake account {snowflakeAccountId} does not have any users extending their sessions.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices on sessions in Snowflake refer to the Snowflake Sessions & Session Policies section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/session-policies#considerations" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 ID.BE-5", + "NIST CSF V1.1 PR.DS-4", + "NIST CSF V1.1 PR.PT-5", + "NIST SP 800-53 Rev. 4 AU-4", + "NIST SP 800-53 Rev. 4 CP-2", + "NIST SP 800-53 Rev. 4 CP-7", + "NIST SP 800-53 Rev. 4 CP-8", + "NIST SP 800-53 Rev. 4 CP-11", + "NIST SP 800-53 Rev. 4 CP-13", + "NIST SP 800-53 Rev. 4 PL-8", + "NIST SP 800-53 Rev. 4 SA-14", + "NIST SP 800-53 Rev. 4 SC-5", + "NIST SP 800-53 Rev. 4 SC-6", + "AICPA TSC CC3.1", + "AICPA TSC A1.1", + "AICPA TSC A1.2", + "ISO 27001:2013 A.11.1.4", + "ISO 27001:2013 A.12.3.1", + "ISO 27001:2013 A.17.1.1", + "ISO 27001:2013 A.17.1.2", + "ISO 27001:2013 A.17.2.1" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-session-timeout-keep-alive-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Snowflake.Account.11] Snowflake Accounts should be monitored for users extending their sessions", + "Description": f"Snowflake account {snowflakeAccountId} has at least one user extending their session. If a client supports the CLIENT_SESSION_KEEP_ALIVE option and the option is set to TRUE, the client preserves the Snowflake session indefinitely as long as the connection to Snowflake is active. Otherwise, if the option is set to FALSE, the session ends after 4 hours. When possible, avoid using this option since it can result in many open sessions and place a greater demand on resources which can lead to a performance degradation. In rarer cases, this can become a security risk if a session is hijacked due to a further downstream vulnerability. For more information refer to the remediation section.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices on sessions in Snowflake refer to the Snowflake Sessions & Session Policies section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/session-policies#considerations" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "RESOLVED", + "RelatedRequirements": [ + "NIST CSF V1.1 ID.BE-5", + "NIST CSF V1.1 PR.DS-4", + "NIST CSF V1.1 PR.PT-5", + "NIST SP 800-53 Rev. 4 AU-4", + "NIST SP 800-53 Rev. 4 CP-2", + "NIST SP 800-53 Rev. 4 CP-7", + "NIST SP 800-53 Rev. 4 CP-8", + "NIST SP 800-53 Rev. 4 CP-11", + "NIST SP 800-53 Rev. 4 CP-13", + "NIST SP 800-53 Rev. 4 PL-8", + "NIST SP 800-53 Rev. 4 SA-14", + "NIST SP 800-53 Rev. 4 SC-5", + "NIST SP 800-53 Rev. 4 SC-6", + "AICPA TSC CC3.1", + "AICPA TSC A1.1", + "AICPA TSC A1.2", + "ISO 27001:2013 A.11.1.4", + "ISO 27001:2013 A.12.3.1", + "ISO 27001:2013 A.17.1.1", + "ISO 27001:2013 A.17.1.2", + "ISO 27001:2013 A.17.2.1" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.account") +def snowflake_network_policy_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Account.12] Snowflake Accounts should have a network policy enabled""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + query = "SELECT * FROM SNOWFLAKE.ACCOUNT_USAGE.NETWORK_POLICIES" + + # if this is empty it is a failing check + q = snowflakeCursor.execute(query).fetchall() + + # B64 encode all of the details for the Asset + assetJson = json.dumps(q,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + # this is a passing check + if not q: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-network-policy-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Account.12] Snowflake Accounts should have a network policy enabled", + "Description": f"Snowflake account {snowflakeAccountId} has at least one network policy. This check does not evaluate the actual contents of the network policy, only that one exists.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for network security and creating Network Policies in Snowflake refer to the Controlling network traffic with network policies section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/network-policies" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-3", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-17", + "NIST SP 800-53 Rev. 4 AC-19", + "NIST SP 800-53 Rev. 4 AC-20", + "NIST SP 800-53 Rev. 4 SC-15", + "AICPA TSC CC6.6", + "ISO 27001:2013 A.6.2.1", + "ISO 27001:2013 A.6.2.2", + "ISO 27001:2013 A.11.2.6", + "ISO 27001:2013 A.13.1.1", + "ISO 27001:2013 A.13.2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 3.1" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-network-policy-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Snowflake.Account.12] Snowflake Accounts should have a network policy enabled", + "Description": f"Snowflake account {snowflakeAccountId} does not have a network policy. This check does not evaluate the actual contents of the network policy, only that one exists. Snowflake network policies are used to control network traffic to and from your Snowflake account. Network policies are defined using a set of rules that specify the conditions under which network traffic is allowed or denied. For more information refer to the remediation section.", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for network security and creating Network Policies in Snowflake refer to the Controlling network traffic with network policies section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/network-policies" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-3", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-17", + "NIST SP 800-53 Rev. 4 AC-19", + "NIST SP 800-53 Rev. 4 AC-20", + "NIST SP 800-53 Rev. 4 SC-15", + "AICPA TSC CC6.6", + "ISO 27001:2013 A.6.2.1", + "ISO 27001:2013 A.6.2.2", + "ISO 27001:2013 A.11.2.6", + "ISO 27001:2013 A.13.1.1", + "ISO 27001:2013 A.13.2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 3.1" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +# EOF \ No newline at end of file diff --git a/eeauditor/auditors/snowflake/Snowflake_Users_Auditor.py b/eeauditor/auditors/snowflake/Snowflake_Users_Auditor.py new file mode 100644 index 00000000..f6c8023d --- /dev/null +++ b/eeauditor/auditors/snowflake/Snowflake_Users_Auditor.py @@ -0,0 +1,1757 @@ +#This file is part of ElectricEye. +#SPDX-License-Identifier: Apache-2.0 + +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +#http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, +#software distributed under the License is distributed on an +#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +#KIND, either express or implied. See the License for the +#specific language governing permissions and limitations +#under the License. + +import logging +from datetime import datetime, timezone, timedelta, UTC +from snowflake.connector import cursor +import snowflake.connector.errors as snowerrors +from check_register import CheckRegister +import base64 +import json + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("SnowflakeUserAuditor") + +registry = CheckRegister() + +def timestamp_to_iso(timestampNtz: str | None) -> str | None: + """ + Receives from Snowflake and transforms to ISO 8601 format stringified datetime objects. If the timestamp is None, it returns None. + """ + if timestampNtz is None: + return None + + try: + dt = datetime.strptime(str(timestampNtz), '%Y-%m-%d %H:%M:%S.%f') + except ValueError: + dt = datetime.strptime(str(timestampNtz), '%Y-%m-%d %H:%M:%S') + + dt = dt.replace(tzinfo=timezone.utc).isoformat() + + return str(dt) + +def get_roles_for_user(username: str, snowflakeCursor: cursor.SnowflakeCursor) -> tuple[list[str | None], bool]: + """ + Retrieves the assigned grants (Roles) for a given user + """ + + query = f""" + SHOW GRANTS TO USER "{username}" + """ + + adminRoles = ["ACCOUNTADMIN","ORGADMIN","SECURITYADMIN","SYSADMIN"] + roles = [] + + try: + q = snowflakeCursor.execute(query) + for row in q.fetchall(): + roles.append(row["role"]) + except TypeError: + logger.warn(f"no roles for the user: {username}") + except snowerrors.ProgrammingError as spe: + if "does not exist" in str(spe): + logger.warning("Snowflake User %s is inactive or roles are unable to be retrieved.", username) + except Exception as e: + logger.warning("Exception encounterd while trying to get roles for user %s: %s", username, e) + return (list(), None) + + if roles: + if any(adminrole in roles for adminrole in adminRoles): + isAdmin = True + else: + isAdmin = False + else: + isAdmin = False + + return roles, isAdmin + +def check_user_logon_without_mfa(username: str, snowflakeCursor: cursor.SnowflakeCursor) -> tuple[bool, int]: + """Pulls distinct logs for a user where they did not use MFA, returns True if they did not use MFA along with the amount of times""" + + # Check for specific users that used Password, didn't fail, and didn't use a 2FA factor + query = f""" + SELECT DISTINCT + USER_NAME, + IS_SUCCESS + FIRST_AUTHENTICATION_FACTOR, + SECOND_AUTHENTICATION_FACTOR + FROM SNOWFLAKE.ACCOUNT_USAGE.LOGIN_HISTORY + WHERE USER_NAME = '{username}' + AND IS_SUCCESS = 'YES' + AND FIRST_AUTHENTICATION_FACTOR = 'PASSWORD' + AND SECOND_AUTHENTICATION_FACTOR IS NULL + """ + + try: + q = snowflakeCursor.execute(query).fetchall() + except Exception as e: + logger.warning("Exception encountered while trying to get logon history for Snowflake user %s: %s", username, e) + return (False, 0) + + if q: + loginWithoutMfa = True + logonsWithoutMfaCount = len(q) + else: + loginWithoutMfa = False + logonsWithoutMfaCount = 0 + + return (loginWithoutMfa, logonsWithoutMfaCount) + +def get_snowflake_users(cache: dict, snowflakeCursor: cursor.SnowflakeCursor) -> dict: + """ + Gathers a list of users from the SNOWFLAKE.ACCOUNT_USAGE.USERS table, enriches the data with Snowflake Roles and Snowflake Logon data, and returns a list of dictionaries containing user data. This is written into the ElectricEye cache. + """ + response = cache.get("get_snowflake_users") + if response: + return response + + snowflakeUsers = [] + + # Use the almighty SQL query to get all the users + query = f""" + SELECT DISTINCT + user_id, + name, + to_timestamp_ntz(created_on) as created_on, + to_timestamp_ntz(deleted_on) as deleted_on, + login_name, + display_name, + first_name, + last_name, + email, + must_change_password, + has_password, + comment, + disabled, + snowflake_lock, + default_warehouse, + default_namespace, + default_role, + ext_authn_duo, + ext_authn_uid, + bypass_mfa_until, + to_timestamp_ntz(last_success_login) as last_success_login, + to_timestamp_ntz(expires_at) as expires_at, + to_timestamp_ntz(locked_until_time) as locked_until_time, + has_rsa_public_key, + to_timestamp_ntz(password_last_set_time) as password_last_set_time, + owner, + default_secondary_role + FROM SNOWFLAKE.ACCOUNT_USAGE.USERS + """ + + try: + q = snowflakeCursor.execute(query) + for column in q.fetchall(): + username = column["NAME"] + try: + pwLastSetTime = str(column["PASSWORD_LAST_SET_TIME"]) + except KeyError: + pwLastSetTime = None + + roleData = get_roles_for_user(username, snowflakeCursor) + + logins = check_user_logon_without_mfa(username, snowflakeCursor) + + snowflakeUsers.append( + { + "user_id": column["USER_ID"], + "name": username, + "created_on": timestamp_to_iso(column["CREATED_ON"]), + "deleted_on": timestamp_to_iso(column["DELETED_ON"]), + "login_name": column["LOGIN_NAME"], + "display_name": column["DISPLAY_NAME"], + "first_name": column["FIRST_NAME"], + "last_name": column["LAST_NAME"], + "email": column["EMAIL"], + "assigned_roles": roleData[0], + "is_admin": roleData[1], + "logged_on_without_mfa": logins[0], + "total_logons_without_mfa": logins[1], + "must_change_password": column["MUST_CHANGE_PASSWORD"], + "has_password": column["HAS_PASSWORD"], + "comment": column["COMMENT"], + "disabled": column["DISABLED"], + "snowflake_lock": column["SNOWFLAKE_LOCK"], + "default_warehouse": column["DEFAULT_WAREHOUSE"], + "default_namespace": column["DEFAULT_NAMESPACE"], + "default_role": column["DEFAULT_ROLE"], + "ext_authn_duo": column["EXT_AUTHN_DUO"], + "ext_authn_uid": column["EXT_AUTHN_UID"], + "bypass_mfa_until": timestamp_to_iso(column["BYPASS_MFA_UNTIL"]), + "last_success_login": timestamp_to_iso(column["LAST_SUCCESS_LOGIN"]), + "expires_at": timestamp_to_iso(column["EXPIRES_AT"]), + "locked_until_time": timestamp_to_iso(column["LOCKED_UNTIL_TIME"]), + "has_rsa_public_key": column["HAS_RSA_PUBLIC_KEY"], + "password_last_set_time": timestamp_to_iso(pwLastSetTime), + "owner": column["OWNER"], + "default_secondary_role": column["DEFAULT_SECONDARY_ROLE"] + } + ) + except Exception as e: + logger.warning("Exception encountered while trying to get Snowflake users: %s", e) + + cache["get_snowflake_users"] = snowflakeUsers + + return cache["get_snowflake_users"] + +@registry.register_check("snowflake.users") +def snowflake_password_assigned_user_has_mfa_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Users.1] Snowflake users with passwords should have MFA enabled""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + # Get all of the users + for user in get_snowflake_users(cache, snowflakeCursor): + # B64 encode all of the details for the Asset + assetJson = json.dumps(user,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + username = user["name"] + # this is a passing check + if user["ext_authn_duo"] is True and user["has_password"] is True and user["deleted_on"] is None: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/password-user-mfa-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Users.1] Snowflake users with passwords should have MFA enabled", + "Description": f"Snowflake user {username} has a password assigned and has MFA enabled.", + "Remediation": { + "Recommendation": { + "Text": "For information on MFA best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Url": "https://community.snowflake.com/s/article/Snowflake-Security-Overview-and-Best-Practices?mkt_tok=MjUyLVJGTy0yMjcAAAGTVPcnsobib0St0CwRwVZ4sfwHPicq12DnL_MX_bz-yG4OgkADmIh6ll3PcRhIqFeezBwdFSNL-ipp9vJHUV6hRiKUK2b-0f5_HGpkwz7pTG2_w6cO9Q" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "MITRE ATT&CK T1589", + "MITRE ATT&CK T1586", + "CIS Snowflake Foundations Benchmark V1.0.0 1.4" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + if user["ext_authn_duo"] is False and user["has_password"] is True and user["deleted_on"] is None and username not in serviceAccountExemptions: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/password-user-mfa-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "MEDIUM"}, + "Confidence": 99, + "Title": "[Snowflake.Users.1] Snowflake users with passwords should have MFA enabled", + "Description": f"Snowflake user {username} has a password assigned but does not have MFA enabled. Multi-factor authentication (MFA) is a security control used to add an additional layer of login security. It works by requiring the user to present two or more proofs (factors) of user identity. An MFA example would be requiring a password and a verification code delivered to the user's phone during user sign-in. The MFA feature for Snowflake users is powered by the Duo Security service. This check does not account for SCIM or IdP-managed users with external MFA devices assigned, that criteria should be manually verified. Refer to the remediation section if this behavior is not intended.", + "Remediation": { + "Recommendation": { + "Text": "For information on MFA best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Url": "https://community.snowflake.com/s/article/Snowflake-Security-Overview-and-Best-Practices?mkt_tok=MjUyLVJGTy0yMjcAAAGTVPcnsobib0St0CwRwVZ4sfwHPicq12DnL_MX_bz-yG4OgkADmIh6ll3PcRhIqFeezBwdFSNL-ipp9vJHUV6hRiKUK2b-0f5_HGpkwz7pTG2_w6cO9Q" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "MITRE ATT&CK T1589", + "MITRE ATT&CK T1586", + "CIS Snowflake Foundations Benchmark V1.0.0 1.4" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.users") +def snowflake_service_account_user_uses_keypair_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Users.2] Snowflake 'service account' users should use RSA key pairs for authentication""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + # Get all of the users + for user in get_snowflake_users(cache, snowflakeCursor): + # B64 encode all of the details for the Asset + assetJson = json.dumps(user,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + username = user["name"] + # this is a passing check + if user["has_rsa_public_key"] is True and user["has_password"] is False and user["deleted_on"] is None: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/service-account-user-rsa-keypair-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Users.2] Snowflake 'service account' users should use RSA key pairs for authentication", + "Description": f"Snowflake 'service account' user {username} uses an RSA key pair for authentication. On the platform level Snowflake does not differentiate between Snowflake users created for and used by humans and Snowflake users created for and used by services. This check assumes that users without a password enabled are service accounts.", + "Remediation": { + "Recommendation": { + "Text": "For information on RSA keypair best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Url": "https://community.snowflake.com/s/article/Snowflake-Security-Overview-and-Best-Practices?mkt_tok=MjUyLVJGTy0yMjcAAAGTVPcnsobib0St0CwRwVZ4sfwHPicq12DnL_MX_bz-yG4OgkADmIh6ll3PcRhIqFeezBwdFSNL-ipp9vJHUV6hRiKUK2b-0f5_HGpkwz7pTG2_w6cO9Q" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "MITRE ATT&CK T1589", + "MITRE ATT&CK T1586", + "CIS Snowflake Foundations Benchmark V1.0.0 1.6" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + if user["has_rsa_public_key"] is False and user["has_password"] is False and user["deleted_on"] is None: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/service-account-user-rsa-keypair-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "MEDIUM"}, + "Confidence": 99, + "Title": "[Snowflake.Users.2] Snowflake 'service account' users should use RSA key pairs for authentication", + "Description": f"Snowflake 'service account' user {username} does not use an RSA key pair for authentication. On the platform level Snowflake does not differentiate between Snowflake users created for and used by humans and Snowflake users created for and used by services. This check assumes that users without a password enabled are service accounts. Password-based authentication used by humans can be augmented by a second factor (MFA), e.g. a hardware token, or a security code pushed to a mobile device. Services and automation cannot be easily configured to authenticate with a second factor. Instead, for such use cases, Snowflake supports using key pair authentication as a more secure alternative to password-based authentication. Note that password-based authentication for a service account can be enabled along with a key-based authentication. To ensure that only key-based authentication is enabled for a service account, the PASSWORD parameter for that Snowflake user must be set to null. For more information on key pair authentication, refer to the Snowflake documentation.", + "Remediation": { + "Recommendation": { + "Text": "For information on RSA keypair best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Url": "https://community.snowflake.com/s/article/Snowflake-Security-Overview-and-Best-Practices?mkt_tok=MjUyLVJGTy0yMjcAAAGTVPcnsobib0St0CwRwVZ4sfwHPicq12DnL_MX_bz-yG4OgkADmIh6ll3PcRhIqFeezBwdFSNL-ipp9vJHUV6hRiKUK2b-0f5_HGpkwz7pTG2_w6cO9Q" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "MITRE ATT&CK T1589", + "MITRE ATT&CK T1586", + "CIS Snowflake Foundations Benchmark V1.0.0 1.6" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.users") +def snowflake_disable_users_without_last_90_day_login_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Users.3] Snowflake users that have not logged in within the last 90 days should be disabled""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + # Get all of the users + for user in get_snowflake_users(cache, snowflakeCursor): + # B64 encode all of the details for the Asset + assetJson = json.dumps(user,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + username = user["name"] + + # determine if there was a successful login in the last 90 days for users that are not disabled and have otherwise logged in + passingCheck = True + if user["last_success_login"] and user["disabled"] == "false" and user["deleted_on"] is None: + lastLogin = datetime.fromisoformat(user["last_success_login"]) + ninetyDaysAgo = datetime.now(UTC) - timedelta(days=90) + if lastLogin > ninetyDaysAgo: + passingCheck = False + + # this is a passing check + if passingCheck: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/disable-user-without-login-in-last-90-days-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "Snowflake users that have not logged in within the last 90 days should be disabled", + "Description": f"Snowflake user {username} is either disabled, deleted, or has logged in within the last 90 days.", + "Remediation": { + "Recommendation": { + "Text": "For information on user management best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Url": "https://community.snowflake.com/s/article/Snowflake-Security-Overview-and-Best-Practices?mkt_tok=MjUyLVJGTy0yMjcAAAGTVPcnsobib0St0CwRwVZ4sfwHPicq12DnL_MX_bz-yG4OgkADmIh6ll3PcRhIqFeezBwdFSNL-ipp9vJHUV6hRiKUK2b-0f5_HGpkwz7pTG2_w6cO9Q" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 ID.AM-3", + "NIST CSF V1.1 DE.AE-1", + "NIST CSF V1.1 DE.AE-3", + "NIST CSF V1.1 DE.CM-1", + "NIST CSF V1.1 DE.CM-7", + "NIST CSF V1.1 PR.PT-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 AC-4", + "NIST SP 800-53 Rev. 4 AU-6", + "NIST SP 800-53 Rev. 4 AU-12", + "NIST SP 800-53 Rev. 4 CA-3", + "NIST SP 800-53 Rev. 4 CA-7", + "NIST SP 800-53 Rev. 4 CA-9", + "NIST SP 800-53 Rev. 4 CM-2", + "NIST SP 800-53 Rev. 4 CM-3", + "NIST SP 800-53 Rev. 4 CM-8", + "NIST SP 800-53 Rev. 4 IR-4", + "NIST SP 800-53 Rev. 4 IR-5", + "NIST SP 800-53 Rev. 4 IR-8", + "NIST SP 800-53 Rev. 4 PE-3", + "NIST SP 800-53 Rev. 4 PE-6", + "NIST SP 800-53 Rev. 4 PE-20", + "NIST SP 800-53 Rev. 4 PL-8", + "NIST SP 800-53 Rev. 4 SC-5", + "NIST SP 800-53 Rev. 4 SC-7", + "NIST SP 800-53 Rev. 4 SI-4", + "AICPA TSC CC3.2", + "AICPA TSC CC6.1", + "AICPA TSC CC7.2", + "ISO 27001:2013 A.12.1.1", + "ISO 27001:2013 A.12.1.2", + "ISO 27001:2013 A.12.4.1", + "ISO 27001:2013 A.12.4.2", + "ISO 27001:2013 A.12.4.3", + "ISO 27001:2013 A.12.4.4", + "ISO 27001:2013 A.12.7.1", + "ISO 27001:2013 A.13.1.1", + "ISO 27001:2013 A.13.2.1", + "ISO 27001:2013 A.13.2.2", + "ISO 27001:2013 A.14.2.7", + "ISO 27001:2013 A.15.2.1", + "ISO 27001:2013 A.16.1.7", + "CIS Snowflake Foundations Benchmark V1.0.0 1.8", + "CIS Snowflake Foundations Benchmark V1.0.0 2.3" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/disable-user-without-login-in-last-90-days-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "Snowflake users that have not logged in within the last 90 days should be disabled", + "Description": f"Snowflake user {username} has not logged in within the last 90 days and should be considered for disablement. Access grants tend to accumulate over time unless explicitly set to expire. Regularly revoking unused access grants and disabling inactive user accounts is a good countermeasure to this dynamic. If credentials of an inactive user account are leaked or stolen, it may take longer to discover the compromise. In Snowflake an user account can be disabled by users with the ACCOUNTADMIN role. Disabling inactive user accounts supports the principle of least privilege and generally reduces attack surface. For more information on user management best practices refer to the Snowflake documentation.", + "Remediation": { + "Recommendation": { + "Text": "For information on user management best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Url": "https://community.snowflake.com/s/article/Snowflake-Security-Overview-and-Best-Practices?mkt_tok=MjUyLVJGTy0yMjcAAAGTVPcnsobib0St0CwRwVZ4sfwHPicq12DnL_MX_bz-yG4OgkADmIh6ll3PcRhIqFeezBwdFSNL-ipp9vJHUV6hRiKUK2b-0f5_HGpkwz7pTG2_w6cO9Q" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 ID.AM-3", + "NIST CSF V1.1 DE.AE-1", + "NIST CSF V1.1 DE.AE-3", + "NIST CSF V1.1 DE.CM-1", + "NIST CSF V1.1 DE.CM-7", + "NIST CSF V1.1 PR.PT-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 AC-4", + "NIST SP 800-53 Rev. 4 AU-6", + "NIST SP 800-53 Rev. 4 AU-12", + "NIST SP 800-53 Rev. 4 CA-3", + "NIST SP 800-53 Rev. 4 CA-7", + "NIST SP 800-53 Rev. 4 CA-9", + "NIST SP 800-53 Rev. 4 CM-2", + "NIST SP 800-53 Rev. 4 CM-3", + "NIST SP 800-53 Rev. 4 CM-8", + "NIST SP 800-53 Rev. 4 IR-4", + "NIST SP 800-53 Rev. 4 IR-5", + "NIST SP 800-53 Rev. 4 IR-8", + "NIST SP 800-53 Rev. 4 PE-3", + "NIST SP 800-53 Rev. 4 PE-6", + "NIST SP 800-53 Rev. 4 PE-20", + "NIST SP 800-53 Rev. 4 PL-8", + "NIST SP 800-53 Rev. 4 SC-5", + "NIST SP 800-53 Rev. 4 SC-7", + "NIST SP 800-53 Rev. 4 SI-4", + "AICPA TSC CC3.2", + "AICPA TSC CC6.1", + "AICPA TSC CC7.2", + "ISO 27001:2013 A.12.1.1", + "ISO 27001:2013 A.12.1.2", + "ISO 27001:2013 A.12.4.1", + "ISO 27001:2013 A.12.4.2", + "ISO 27001:2013 A.12.4.3", + "ISO 27001:2013 A.12.4.4", + "ISO 27001:2013 A.12.7.1", + "ISO 27001:2013 A.13.1.1", + "ISO 27001:2013 A.13.2.1", + "ISO 27001:2013 A.13.2.2", + "ISO 27001:2013 A.14.2.7", + "ISO 27001:2013 A.15.2.1", + "ISO 27001:2013 A.16.1.7", + "CIS Snowflake Foundations Benchmark V1.0.0 1.8", + "CIS Snowflake Foundations Benchmark V1.0.0 2.3" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.users") +def snowflake_accountadmins_have_email_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Users.4] Snowflake users assigned the ACCOUNTADMIN role should have an email address assigned""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + # Get all of the users + for user in get_snowflake_users(cache, snowflakeCursor): + # B64 encode all of the details for the Asset + assetJson = json.dumps(user,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + username = user["name"] + # pre-check email, the shit can be properly null or stupid sauce fr fr + hasEmail = True + if user["email"] is None or user["email"] == "": + hasEmail = False + # this is a passing check + if "ACCOUNTADMIN" in user["assigned_roles"] and hasEmail is True and user["has_password"] is True and user["deleted_on"] is None: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/accountadmin-role-users-have-email-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Users.4] Snowflake users assigned the ACCOUNTADMIN role should have an email address assigned", + "Description": f"Snowflake user {username} has the ACCOUNTADMIN role assigned and has an email addressed assigned as well. This only checks for the presence of an email for users that also have a password, since 'service accounts' do not have passwords and do not need an email address.", + "Remediation": { + "Recommendation": { + "Text": "For information on assinging emails the the rationale for ACCOUNTADMINS to have emails refer to the Access control considerations section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/security-access-control-considerations" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "MITRE ATT&CK T1589", + "MITRE ATT&CK T1586", + "CIS Snowflake Foundations Benchmark V1.0.0 1.11" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + if "ACCOUNTADMIN" in user["assigned_roles"] and hasEmail is False and user["has_password"] is True and user["deleted_on"] is None and username not in serviceAccountExemptions: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/accountadmin-role-users-have-email-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Snowflake.Users.4] Snowflake users assigned the ACCOUNTADMIN role should have an email address assigned", + "Description": f"Snowflake user {username} has the ACCOUNTADMIN role assigned and does not have an email addressed assigned. Every Snowflake user can be assigned an email address. The email addresses are then used by Snowflake features like notification integration, resource monitor and support cases to deliver email notifications to Snowflake users. In trial Snowflake accounts these email addresses are used for password reset functionality. The email addresses assigned to ACCOUNTADMIN users are used by Snowflake to notify administrators about important events related to their accounts. For example, ACCOUNTADMIN users are notified about impending expiration of SAML2 certificates or SCIM access tokens. If users with the ACCOUNTADMIN role are not assigned working email addresses that are being monitored and if SAML2 certificate used in SSO integration is not proactively renewed, expiration of SAML2 certificate may break the SSO authentication flow. Similarly, uncaught expiration of SCIM access token may break the SCIM integration. This only checks for the presence of an email for users that also have a password, since 'service accounts' do not have passwords and do not need an email address. For more information on user management best practices refer to the Snowflake documentation.", + "Remediation": { + "Recommendation": { + "Text": "For information on assinging emails the the rationale for ACCOUNTADMINS to have emails refer to the Access control considerations section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/security-access-control-considerations" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "MITRE ATT&CK T1589", + "MITRE ATT&CK T1586", + "CIS Snowflake Foundations Benchmark V1.0.0 1.11" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.users") +def snowflake_admin_default_role_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Users.5] Snowflake users should not be assigned the ACCOUNTADMIN or SECURITYADMIN role as the default role""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + # Get all of the users + for user in get_snowflake_users(cache, snowflakeCursor): + # B64 encode all of the details for the Asset + assetJson = json.dumps(user,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + username = user["name"] + # this is a passing check + if user["default_role"] not in ["ACCOUNTADMIN","SECURITYADMIN"] or user["default_role"] is None and user["deleted_on"] is None: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/snowflake-admin-default-role-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Users.5] Snowflake users should not be assigned the ACCOUNTADMIN or SECURITYADMIN role as the default role", + "Description": f"Snowflake user {username} does has not have the ACCOUNTADMIN nor the SECURITYADMIN role as their default role.", + "Remediation": { + "Recommendation": { + "Text": "For information on assinging default roles and the rationale for not assigning ACCOUNTADMIN or SECURITYADMIN as the default rolerefer to the Avoid using the ACCOUNTADMIN role to create objects section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/security-access-control-considerations#avoid-using-the-accountadmin-role-to-create-objects" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-3", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-17", + "NIST SP 800-53 Rev. 4 AC-19", + "NIST SP 800-53 Rev. 4 AC-20", + "NIST SP 800-53 Rev. 4 SC-15", + "AICPA TSC CC6.6", + "ISO 27001:2013 A.6.2.1", + "ISO 27001:2013 A.6.2.2", + "ISO 27001:2013 A.11.2.6", + "ISO 27001:2013 A.13.1.1", + "ISO 27001:2013 A.13.2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 1.12" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + if user["default_role"] in ["ACCOUNTADMIN","SECURITYADMIN"] and user["deleted_on"] is None and username not in serviceAccountExemptions: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/snowflake-admin-default-role-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "HIGH"}, + "Confidence": 99, + "Title": "[Snowflake.Users.5] Snowflake users should not be assigned the ACCOUNTADMIN or SECURITYADMIN role as the default role", + "Description": f"Snowflake user {username} has either the ACCOUNTADMIN or SECURITYADMIN role as their default role. The ACCOUNTADMIN system role is the most powerful role in a Snowflake account and is intended for performing initial setup and managing account-level objects. SECURITYADMIN role can trivially escalate their privileges to that of ACCOUNTADMIN. Neither of these roles should be used for performing daily non-administrative tasks in a Snowflake account. Instead, users should be assigned custom roles containing only those privileges that are necessary for successfully completing their job responsibilities. When ACCOUNTADMIN is not set as a default user role, it forces account administrators to explicitly change their role to ACCOUNTADMIN each time they log in. This can help make account administrators aware of the purpose of roles in the system, prevent them from inadvertently using the ACCOUNTADMIN role for non-administrative tasks, and encourage them to change to the appropriate role for a given task. Same logic applies to the SECURITYADMIN role. For more information on user management best practices refer to the Snowflake documentation.", + "Remediation": { + "Recommendation": { + "Text": "For information on assinging default roles and the rationale for not assigning ACCOUNTADMIN or SECURITYADMIN as the default rolerefer to the Avoid using the ACCOUNTADMIN role to create objects section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/security-access-control-considerations#avoid-using-the-accountadmin-role-to-create-objects" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-3", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-17", + "NIST SP 800-53 Rev. 4 AC-19", + "NIST SP 800-53 Rev. 4 AC-20", + "NIST SP 800-53 Rev. 4 SC-15", + "AICPA TSC CC6.6", + "ISO 27001:2013 A.6.2.1", + "ISO 27001:2013 A.6.2.2", + "ISO 27001:2013 A.11.2.6", + "ISO 27001:2013 A.13.1.1", + "ISO 27001:2013 A.13.2.1", + "CIS Snowflake Foundations Benchmark V1.0.0 1.12" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.users") +def snowflake_logins_without_mfa_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Users.6] Snowflake users should be monitored for logins without MFA""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + # Get all of the users + for user in get_snowflake_users(cache, snowflakeCursor): + # B64 encode all of the details for the Asset + assetJson = json.dumps(user,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + username = user["name"] + + # Hey, we prepoulate the MFA status in the user object so we can just check it here + loggedInWithoutMfa = user["logged_on_without_mfa"] + timesLoggedInWithoutMfa = user["total_logons_without_mfa"] + + # this is a passing check + if loggedInWithoutMfa is False and user["has_password"] is True and user["deleted_on"] is None: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/snowflake-logins-without-mfa-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Users.6] Snowflake users should be monitored for logins without MFA", + "Description": f"Snowflake user {username} has not logged in without MFA. This check does not take into account if users have *never* logged in nor does it take into account if users have MFA enabled. This check relies on data stored in the LOGON_HISTORY view and may not be up-to-date.", + "Remediation": { + "Recommendation": { + "Text": "For information on MFA best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Url": "https://community.snowflake.com/s/article/Snowflake-Security-Overview-and-Best-Practices?mkt_tok=MjUyLVJGTy0yMjcAAAGTVPcnsobib0St0CwRwVZ4sfwHPicq12DnL_MX_bz-yG4OgkADmIh6ll3PcRhIqFeezBwdFSNL-ipp9vJHUV6hRiKUK2b-0f5_HGpkwz7pTG2_w6cO9Q" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "MITRE ATT&CK T1589", + "MITRE ATT&CK T1586", + "CIS Snowflake Foundations Benchmark V1.0.0 2.4" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + if loggedInWithoutMfa is True and user["has_password"] is True and user["deleted_on"] is None and username not in serviceAccountExemptions: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/snowflake-logins-without-mfa-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Snowflake.Users.6] Snowflake users should be monitored for logins without MFA", + "Description": f"Snowflake user {username} has logged in without MFA {timesLoggedInWithoutMfa} times. This check relies on data stored in the LOGON_HISTORY view and includes at least a year of logins, hence the lower severity level. Multi-factor authentication (MFA) is a security control used to add an additional layer of login security. It works by requiring the user to present two or more proofs (factors) of user identity. An MFA example would be requiring a password and a verification code delivered to the user's phone during user sign-in. MFA mitigates security threats of users creating weak passwords and user passwords being stolen or accidentally leaked. For more information on MFA best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Remediation": { + "Recommendation": { + "Text": "For information on MFA best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Url": "https://community.snowflake.com/s/article/Snowflake-Security-Overview-and-Best-Practices?mkt_tok=MjUyLVJGTy0yMjcAAAGTVPcnsobib0St0CwRwVZ4sfwHPicq12DnL_MX_bz-yG4OgkADmIh6ll3PcRhIqFeezBwdFSNL-ipp9vJHUV6hRiKUK2b-0f5_HGpkwz7pTG2_w6cO9Q" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "MITRE ATT&CK T1589", + "MITRE ATT&CK T1586", + "CIS Snowflake Foundations Benchmark V1.0.0 2.4" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.users") +def snowflake_admin_password_users_yearly_password_rotation_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Users.7] Snowflake users with any admin role assigned should have their password rotated yearly""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + # Get all of the users + for user in get_snowflake_users(cache, snowflakeCursor): + # B64 encode all of the details for the Asset + assetJson = json.dumps(user,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + username = user["name"] + + # Use the "is_admin" field to determine if the user is an admin and the "password_last_set_time" field (ISO-8061) to determine if the password has been rotated in the last year + rotatedInLastYear = True + isAdmin = user["is_admin"] + passwordLastSetTime = datetime.fromisoformat(user["password_last_set_time"]) + currentTime = datetime.now(UTC) + daysAgo = currentTime - timedelta(days=365) + if passwordLastSetTime < daysAgo: + rotatedInLastYear = False + + # this is a passing check + if rotatedInLastYear is True and isAdmin is True and user["has_password"] is True and user["deleted_on"] is None: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/snowflake-admins-yearly-passowrd-rotation-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Users.7] Snowflake users with any admin role assigned should have their password rotated yearly", + "Description": f"Snowflake user {username} has an admin role assigned and has rotated their password in the last year. This check does not account for custom assigned roles, only the built-in Snowflake admin roles: ACCOUNTADMIN, ORGADMIN, SECURITYADMIN, or SYSADMIN. This check also only checks if there is a password set for the user, as 'service accounts' do not have passwords and do not need to be rotated.", + "Remediation": { + "Recommendation": { + "Text": "For information on security best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Url": "https://community.snowflake.com/s/article/Snowflake-Security-Overview-and-Best-Practices?mkt_tok=MjUyLVJGTy0yMjcAAAGTVPcnsobib0St0CwRwVZ4sfwHPicq12DnL_MX_bz-yG4OgkADmIh6ll3PcRhIqFeezBwdFSNL-ipp9vJHUV6hRiKUK2b-0f5_HGpkwz7pTG2_w6cO9Q" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "MITRE ATT&CK T1589", + "MITRE ATT&CK T1586" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + if rotatedInLastYear is False and isAdmin is True and user["has_password"] is True and user["deleted_on"] is None: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/snowflake-admins-yearly-passowrd-rotation-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Snowflake.Users.7] Snowflake users with any admin role assigned should have their password rotated yearly", + "Description": f"Snowflake user {username} has an admin role assigned and has not rotated their password in the last year. This check does not account for custom assigned roles, only the built-in Snowflake admin roles: ACCOUNTADMIN, ORGADMIN, SECURITYADMIN, or SYSADMIN. This check also only checks if there is a password set for the user, as 'service accounts' do not have passwords and do not need to be rotated. Password rotation is a security best practice that helps prevent unauthorized access to systems and data. For more information on security best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Remediation": { + "Recommendation": { + "Text": "For information on security best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Url": "https://community.snowflake.com/s/article/Snowflake-Security-Overview-and-Best-Practices?mkt_tok=MjUyLVJGTy0yMjcAAAGTVPcnsobib0St0CwRwVZ4sfwHPicq12DnL_MX_bz-yG4OgkADmIh6ll3PcRhIqFeezBwdFSNL-ipp9vJHUV6hRiKUK2b-0f5_HGpkwz7pTG2_w6cO9Q" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "MITRE ATT&CK T1589", + "MITRE ATT&CK T1586" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.users") +def snowflake_bypass_mfa_review_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Users.8] Snowflake users allowed to bypass MFA should be reviewed""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + # Get all of the users + for user in get_snowflake_users(cache, snowflakeCursor): + # B64 encode all of the details for the Asset + assetJson = json.dumps(user,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + username = user["name"] + + # Use the "bypass_mfa_until" field (ISO-8061) to determine if the user is allowed to bypass MFA by checking if the date is in the future - only perform this check for password users with MFA enabled + mfaBypass = False + if user["ext_authn_duo"] is True and user["has_password"] is True: + if user["bypass_mfa_until"] is not None: + bypassMfaUntil = datetime.fromisoformat(user["bypass_mfa_until"]) + currentTime = datetime.now(UTC) + if bypassMfaUntil > currentTime: + mfaBypass = True + + # this is a passing check + if mfaBypass is False and user["deleted_on"] is None: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/snowflake-user-mfa-bypass-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Users.8] Snowflake users allowed to bypass MFA should be reviewed", + "Description": f"Snowflake user {username} is not allowed to bypass MFA or they do not have MFA or a Password enabled.", + "Remediation": { + "Recommendation": { + "Text": "For information on managing MFA and bypass for users in Snowflake refer to the Managing MFA for an account and users section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/security-mfa" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "MITRE ATT&CK T1589", + "MITRE ATT&CK T1586" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + if mfaBypass is True and user["deleted_on"] is None and username not in serviceAccountExemptions: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/{username}/snowflake-user-mfa-bypass-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": f"{snowflakeAccountId}/{username}", + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Snowflake.Users.8] Snowflake users allowed to bypass MFA should be reviewed", + "Description": f"Snowflake user {username} has MFA assigned and is allowed to bypass MFA. When MFA is enabled, users are required to provide two or more verification factors to access their account. Allowing users to bypass MFA can increase the risk of unauthorized access to your Snowflake account. While there are some administrative reasons to bypass MFA, these users should be reviewed to ensure that they are not a security risk.", + "Remediation": { + "Recommendation": { + "Text": "For information on managing MFA and bypass for users in Snowflake refer to the Managing MFA for an account and users section of the Snowflake Documentation Portal.", + "Url": "https://docs.snowflake.com/en/user-guide/security-mfa" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Identity & Access Management", + "AssetService": "Snowflake Users", + "AssetComponent": "User" + }, + "Resources": [ + { + "Type": "SnowflakeUser", + "Id": username, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-1", + "NIST SP 800-53 Rev. 4 AC-1", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 IA-1", + "NIST SP 800-53 Rev. 4 IA-2", + "NIST SP 800-53 Rev. 4 IA-3", + "NIST SP 800-53 Rev. 4 IA-4", + "NIST SP 800-53 Rev. 4 IA-5", + "NIST SP 800-53 Rev. 4 IA-6", + "NIST SP 800-53 Rev. 4 IA-7", + "NIST SP 800-53 Rev. 4 IA-8", + "NIST SP 800-53 Rev. 4 IA-9", + "NIST SP 800-53 Rev. 4 IA-10", + "NIST SP 800-53 Rev. 4 IA-11", + "AICPA TSC CC6.1", + "AICPA TSC CC6.2", + "ISO 27001:2013 A.9.2.1", + "ISO 27001:2013 A.9.2.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.2.4", + "ISO 27001:2013 A.9.2.6", + "ISO 27001:2013 A.9.3.1", + "ISO 27001:2013 A.9.4.2", + "ISO 27001:2013 A.9.4.3", + "MITRE ATT&CK T1589", + "MITRE ATT&CK T1586" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +@registry.register_check("snowflake.users") +def snowflake_limit_admin_users_check( + cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str, snowflakeAccountId: str, snowflakeRegion: str, snowflakeCursor: cursor.SnowflakeCursor, serviceAccountExemptions: list[str] +) -> dict: + """[Snowflake.Users.9] Snowflake Accounts should have at least two admin users but less than ten""" + # ISO Time + iso8601Time = datetime.now(UTC).replace(tzinfo=timezone.utc).isoformat() + + # using the "is_admin" field to determine if the user is an admin, create a list comprehension to count the number of admins, if the count is less than 2 or greater than 10 this check will fail by changing the properAmountOfAdmins variable to False + properAmountOfAdmins = True + adminUsers = [user for user in get_snowflake_users(cache, snowflakeCursor) if user["is_admin"] is True and user["deleted_on"] is None] + adminCount = len(adminUsers) + if adminCount < 2 or adminCount > 10: + properAmountOfAdmins = False + + # B64 encode all of the details for the Asset + assetJson = json.dumps(adminUsers,default=str).encode("utf-8") + assetB64 = base64.b64encode(assetJson) + + # this is a passing check + if properAmountOfAdmins is True: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-limted-admins-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "INFORMATIONAL"}, + "Confidence": 99, + "Title": "[Snowflake.Users.9] Snowflake Accounts should have at least two admin users but less than ten", + "Description": f"Snowflake account {snowflakeAccountId} has more than two users with admin roles and less than ten. ORGADMIN, SECURITYADMIN, ACCOUNTADMIN, and SYSADMIN are the built-in Snowflake admin roles. This check does not account for custom assigned roles, only the built-in Snowflake admin roles. Following the principle of least privilege that prescribes limiting user's privileges to those that are strictly required to do their jobs, the admin roles should be assigned to a limited number of designated users (e.g., less than 10, but at least 2 to ensure that access can be recovered if one ACCOUNTAMIN user is having login difficulties).", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Url": "https://community.snowflake.com/s/article/Snowflake-Security-Overview-and-Best-Practices?mkt_tok=MjUyLVJGTy0yMjcAAAGTVPcnsobib0St0CwRwVZ4sfwHPicq12DnL_MX_bz-yG4OgkADmIh6ll3PcRhIqFeezBwdFSNL-ipp9vJHUV6hRiKUK2b-0f5_HGpkwz7pTG2_w6cO9Q" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "PASSED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-4", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 AC-3", + "NIST SP 800-53 Rev. 4 AC-5", + "NIST SP 800-53 Rev. 4 AC-6", + "NIST SP 800-53 Rev. 4 AC-16", + "AICPA TSC CC6.3", + "ISO 27001:2013 A.6.1.2", + "ISO 27001:2013 A.9.1.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.4.1", + "ISO 27001:2013 A.9.4.4", + "MITRE ATT&CK T1210", + "MITRE ATT&CK T1570", + "MITRE ATT&CK T1021.007", + "MITRE ATT&CK T1020", + "MITRE ATT&CK T1048", + "MITRE ATT&CK T1567", + "CIS Snowflake Foundations Benchmark V1.0.0 1.10" + ] + }, + "Workflow": {"Status": "RESOLVED"}, + "RecordState": "ARCHIVED" + } + yield finding + # this is a failing check + else: + finding = { + "SchemaVersion": "2018-10-08", + "Id": f"{snowflakeAccountId}/snowflake-account-limted-admins-check", + "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", + "GeneratorId": snowflakeAccountId, + "AwsAccountId": awsAccountId, + "Types": ["Software and Configuration Checks/AWS Security Best Practices"], + "FirstObservedAt": iso8601Time, + "CreatedAt": iso8601Time, + "UpdatedAt": iso8601Time, + "Severity": {"Label": "LOW"}, + "Confidence": 99, + "Title": "[Snowflake.Users.9] Snowflake Accounts should have at least two admin users but less than ten", + "Description": f"Snowflake account {snowflakeAccountId} either has less than two admins or more than ten. ORGADMIN, SECURITYADMIN, ACCOUNTADMIN, and SYSADMIN are the built-in Snowflake admin roles. This check does not account for custom assigned roles, only the built-in Snowflake admin roles. Following the principle of least privilege that prescribes limiting user's privileges to those that are strictly required to do their jobs, the admin roles should be assigned to a limited number of designated users (e.g., less than 10, but at least 2 to ensure that access can be recovered if one ACCOUNTAMIN user is having login difficulties).", + "Remediation": { + "Recommendation": { + "Text": "For information on best practices for users in Snowflake refer to the community post Snowflake Security Overview and Best Practices in the Snowflake Community Portal.", + "Url": "https://community.snowflake.com/s/article/Snowflake-Security-Overview-and-Best-Practices?mkt_tok=MjUyLVJGTy0yMjcAAAGTVPcnsobib0St0CwRwVZ4sfwHPicq12DnL_MX_bz-yG4OgkADmIh6ll3PcRhIqFeezBwdFSNL-ipp9vJHUV6hRiKUK2b-0f5_HGpkwz7pTG2_w6cO9Q" + } + }, + "ProductFields": { + "ProductName": "ElectricEye", + "Provider": "Snowflake", + "ProviderType": "SaaS", + "ProviderAccountId": snowflakeAccountId, + "AssetRegion": snowflakeRegion, + "AssetDetails": assetB64, + "AssetClass": "Management & Governance", + "AssetService": "Snowflake Account", + "AssetComponent": "Account" + }, + "Resources": [ + { + "Type": "SnowflakeAccount", + "Id": snowflakeAccountId, + "Partition": awsPartition, + "Region": awsRegion + } + ], + "Compliance": { + "Status": "FAILED", + "RelatedRequirements": [ + "NIST CSF V1.1 PR.AC-4", + "NIST SP 800-53 Rev. 4 AC-2", + "NIST SP 800-53 Rev. 4 AC-3", + "NIST SP 800-53 Rev. 4 AC-5", + "NIST SP 800-53 Rev. 4 AC-6", + "NIST SP 800-53 Rev. 4 AC-16", + "AICPA TSC CC6.3", + "ISO 27001:2013 A.6.1.2", + "ISO 27001:2013 A.9.1.2", + "ISO 27001:2013 A.9.2.3", + "ISO 27001:2013 A.9.4.1", + "ISO 27001:2013 A.9.4.4", + "MITRE ATT&CK T1210", + "MITRE ATT&CK T1570", + "MITRE ATT&CK T1021.007", + "MITRE ATT&CK T1020", + "MITRE ATT&CK T1048", + "MITRE ATT&CK T1567", + "CIS Snowflake Foundations Benchmark V1.0.0 1.10" + ] + }, + "Workflow": {"Status": "NEW"}, + "RecordState": "ACTIVE" + } + yield finding + +# EOF \ No newline at end of file diff --git a/eeauditor/cloud_utils.py b/eeauditor/cloud_utils.py index 3631b790..db45bdef 100644 --- a/eeauditor/cloud_utils.py +++ b/eeauditor/cloud_utils.py @@ -28,15 +28,11 @@ from botocore.exceptions import ClientError from azure.identity import ClientSecretCredential from azure.mgmt.resource.subscriptions import SubscriptionClient +import snowflake.connector as snowconn +logging.basicConfig(level=logging.INFO) logger = logging.getLogger("CloudUtils") -# Boto3 Clients -sts = boto3.client("sts") -ssm = boto3.client("ssm") -asm = boto3.client("secretsmanager") -org = boto3.client("organizations") - # These Constants define legitimate values for certain parameters within the external_providers.toml file AWS_MULTI_ACCOUNT_TARGET_TYPE_CHOICES = ["Accounts", "OU", "Organization"] CREDENTIALS_LOCATION_CHOICES = ["AWS_SSM", "AWS_SECRETS_MANAGER", "CONFIG_FILE"] @@ -47,454 +43,511 @@ class CloudConfig(object): for use in EEAuditor when running ElectricEye Auditors and Check """ - def __init__(self, assessmentTarget, tomlPath): - if tomlPath is None: - here = path.abspath(path.dirname(__file__)) - tomlFile = f"{here}/external_providers.toml" - else: - tomlFile = tomlPath + def __init__(self, assessmentTarget: str, tomlPath: str | None, useToml: str, args: str | None): + if useToml == "True": + if tomlPath is None: + here = path.abspath(path.dirname(__file__)) + tomlFile = f"{here}/external_providers.toml" + else: + tomlFile = tomlPath - with open(tomlFile, "rb") as f: - data = tomload(f) + with open(tomlFile, "rb") as f: + data = tomload(f) - # From TOML [global] - if data["global"]["aws_multi_account_target_type"] not in AWS_MULTI_ACCOUNT_TARGET_TYPE_CHOICES: - logger.error("Invalid option for [global.aws_multi_account_target_type].") - sys.exit(2) - self.awsMultiAccountTargetType = data["global"]["aws_multi_account_target_type"] + # From TOML [global] + if data["global"]["aws_multi_account_target_type"] not in AWS_MULTI_ACCOUNT_TARGET_TYPE_CHOICES: + logger.error("Invalid option for [global.aws_multi_account_target_type].") + sys.exit(2) + self.awsMultiAccountTargetType = data["global"]["aws_multi_account_target_type"] - if data["global"]["credentials_location"] not in CREDENTIALS_LOCATION_CHOICES: - logger.error( - "Invalid option for [global.credentials_location]. Must be one of %s.", - CREDENTIALS_LOCATION_CHOICES - ) - sys.exit(2) - self.credentialsLocation = data["global"]["credentials_location"] + if data["global"]["credentials_location"] not in CREDENTIALS_LOCATION_CHOICES: + logger.error( + "Invalid option for [global.credentials_location]. Must be one of %s.", + CREDENTIALS_LOCATION_CHOICES + ) + sys.exit(2) + + self.credentialsLocation = data["global"]["credentials_location"] + # from args + if useToml == "False": + # first turn args from a string into a dictionary + args = json.loads(args) ################################## # PUBLIC CLOUD SERVICE PROVIDERS # ################################## - - # AWS - if assessmentTarget == "AWS": - # Process ["aws_account_targets"] - awsAccountTargets = data["regions_and_accounts"]["aws"]["aws_account_targets"] - if self.awsMultiAccountTargetType == "Accounts": - if not awsAccountTargets: - self.awsAccountTargets = [sts.get_caller_identity()["Account"]] - else: - self.awsAccountTargets = awsAccountTargets - elif self.awsMultiAccountTargetType == "OU": - if not awsAccountTargets: - logger.error("OU was specified but targets were not specified.") - sys.exit(2) - # Regex to check for Valid OUs - ouIdRegex = compile(r"^ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}$") - for ou in awsAccountTargets: - if not ouIdRegex.match(ou): - logger.error(f"Invalid Organizational Unit ID {ou}.") + if useToml == "True": + # AWS + if assessmentTarget == "AWS": + sts = boto3.client("sts") + # Process ["aws_account_targets"] + awsAccountTargets = data["regions_and_accounts"]["aws"]["aws_account_targets"] + if self.awsMultiAccountTargetType == "Accounts": + if not awsAccountTargets: + self.awsAccountTargets = [sts.get_caller_identity()["Account"]] + else: + self.awsAccountTargets = awsAccountTargets + elif self.awsMultiAccountTargetType == "OU": + if not awsAccountTargets: + logger.error("OU was specified but targets were not specified.") sys.exit(2) - self.awsAccountTargets = self.get_aws_accounts_from_organizational_units(awsAccountTargets) - elif self.awsMultiAccountTargetType == "Organization": - self.awsAccountTargets = self.get_aws_accounts_from_organization() - - # Process ["aws_regions_selection"] - awsRegions = self.get_aws_regions() - if not data["regions_and_accounts"]["aws"]["aws_regions_selection"]: - self.awsRegionsSelection = [boto3.Session().region_name] - else: - tomlRegions = data["regions_and_accounts"]["aws"]["aws_regions_selection"] - if "All" in tomlRegions: - self.awsRegionsSelection = awsRegions + # Regex to check for Valid OUs + ouIdRegex = compile(r"^ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}$") + for ou in awsAccountTargets: + if not ouIdRegex.match(ou): + logger.error(f"Invalid Organizational Unit ID {ou}.") + sys.exit(2) + self.awsAccountTargets = self.get_aws_accounts_from_organizational_units(awsAccountTargets) + elif self.awsMultiAccountTargetType == "Organization": + self.awsAccountTargets = self.get_aws_accounts_from_organization() + + # Process ["aws_regions_selection"] + awsRegions = self.get_aws_regions() + if not data["regions_and_accounts"]["aws"]["aws_regions_selection"]: + self.awsRegionsSelection = [boto3.Session().region_name] else: - # Validation check - self.awsRegionsSelection = [a for a in tomlRegions if a in awsRegions] + tomlRegions = data["regions_and_accounts"]["aws"]["aws_regions_selection"] + if "All" in tomlRegions: + self.awsRegionsSelection = awsRegions + else: + # Validation check + self.awsRegionsSelection = [a for a in tomlRegions if a in awsRegions] + + # Process ["aws_electric_eye_iam_role_name"] + electricEyeRoleName = data["regions_and_accounts"]["aws"]["aws_electric_eye_iam_role_name"] + if electricEyeRoleName is None or electricEyeRoleName == "": + logger.warning( + "A value for ['aws_electric_eye_iam_role_name'] was not provided. Will attempt to use current session credentials, this will likely fail if you're attempting to assess another AWS account." + ) + electricEyeRoleName = None + + self.electricEyeRoleName = electricEyeRoleName - # Process ["aws_electric_eye_iam_role_name"] - electricEyeRoleName = data["regions_and_accounts"]["aws"]["aws_electric_eye_iam_role_name"] - if electricEyeRoleName is None or electricEyeRoleName == "": - logger.warning( - "A value for ['aws_electric_eye_iam_role_name'] was not provided. Will attempt to use current session credentials, this will likely fail if you're attempting to assess another AWS account." - ) - electricEyeRoleName = None - - self.electricEyeRoleName = electricEyeRoleName - - # GCP - elif assessmentTarget == "GCP": - # Process ["gcp_project_ids"] - gcpProjects = data["regions_and_accounts"]["gcp"]["gcp_project_ids"] - if not gcpProjects: - logger.error("No GCP Projects were provided in [regions_and_accounts.gcp.gcp_project_ids].") - sys.exit(2) - else: - self.gcpProjectIds = gcpProjects - - # Process ["gcp_service_account_json_payload_value"] - gcpCred = data["credentials"]["gcp"]["gcp_service_account_json_payload_value"] - if self.credentialsLocation == "CONFIG_FILE": - self.gcpServiceAccountJsonPayloadValue = gcpCred - elif self.credentialsLocation == "AWS_SSM": - self.gcpServiceAccountJsonPayloadValue = self.get_credential_from_aws_ssm( - gcpCred, - "gcp_service_account_json_payload_value" - ) - elif self.credentialsLocation == "AWS_SECRETS_MANAGER": - self.gcpServiceAccountJsonPayloadValue = self.get_credential_from_aws_secrets_manager( - gcpCred, - "gcp_service_account_json_payload_value" - ) - self.setup_gcp_credentials(self.gcpServiceAccountJsonPayloadValue) - - # Oracle Cloud Infrastructure (OCI) - elif assessmentTarget == "OCI": - ociValues = data["regions_and_accounts"]["oci"] - - # Retrieve the OCIDs for Tenancy & User and the Region ID along with a list of Compartment OCIDs - ociTenancyId = ociValues["oci_tenancy_ocid"] - ociUserId = ociValues["oci_user_ocid"] - ociRegionName = ociValues["oci_region_name"] - ociCompartments = ociValues["oci_compartment_ocids"] - # Process the [credentials.oci] - ociUserApiKeyFingerprint = data["credentials"]["oci"]["oci_user_api_key_fingerprint_value"] - ociUserApiKeyPemValue = data["credentials"]["oci"]["oci_user_api_key_private_key_pem_contents_value"] - - if any( - # Check to make sure none of the variables pulled from TOML are emtpy - not var for var in [ - ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint, ociUserApiKeyPemValue - ] - ): - logger.error(f"One of your Oracle Cloud TOML entries in [regions_and_accounts.oci] or [credentials.oci] is empty!") - sys.exit(2) - - # Assign ["regions_and_accounts"]["oci"] values to `self` - self.ociTenancyId = ociTenancyId - self.ociUserId = ociUserId - self.ociRegionName = ociRegionName - self.ociCompartments = ociCompartments - - # Process ["oci_user_api_key_fingerprint_value"] - ociUserApiKeyFingerprint = data["credentials"]["oci"]["oci_user_api_key_fingerprint_value"] - if self.credentialsLocation == "CONFIG_FILE": - ociUserApiKeyFingerprint = ociUserApiKeyFingerprint - elif self.credentialsLocation == "AWS_SSM": - ociUserApiKeyFingerprint = self.get_credential_from_aws_ssm( - ociUserApiKeyFingerprint, - "oci_user_api_key_fingerprint_value" - ) - elif self.credentialsLocation == "AWS_SECRETS_MANAGER": - ociUserApiKeyFingerprint = self.get_credential_from_aws_secrets_manager( - ociUserApiKeyFingerprint, - "oci_user_api_key_fingerprint_value" - ) - - self.ociUserApiKeyFingerprint = ociUserApiKeyFingerprint - - # Process ["oci_user_api_key_private_key_pem_contents_value"] - ociUserApiKeyPemLocation = data["credentials"]["oci"]["oci_user_api_key_private_key_pem_contents_value"] - if self.credentialsLocation == "CONFIG_FILE": - ociUserApiKeyPemLocation = ociUserApiKeyPemLocation - elif self.credentialsLocation == "AWS_SSM": - ociUserApiKeyPemLocation = self.get_credential_from_aws_ssm( - ociUserApiKeyPemLocation, - "oci_user_api_key_private_key_pem_contents_value" - ) - elif self.credentialsLocation == "AWS_SECRETS_MANAGER": - ociUserApiKeyPemLocation = self.get_credential_from_aws_secrets_manager( - ociUserApiKeyPemLocation, - "oci_user_api_key_private_key_pem_contents_value" - ) - - # Create the PEM file and save the location of it to environ - self.setup_oci_credentials(ociUserApiKeyPemLocation) - - # Azure - elif assessmentTarget == "Azure": - # Process data["credentials"]["azure"] - values need to be assigned to self - azureValues = data["credentials"]["azure"] - - azureClientId = azureValues["azure_ent_app_client_id_value"] - azureSecretId = azureValues["azure_ent_app_client_secret_id_value"] - azureTenantId = azureValues["azure_ent_app_tenant_id_value"] - azureSubscriptions = data["regions_and_accounts"]["azure"]["azure_subscription_ids"] - - del azureValues - - if any( - # Check to make sure none of the variables pulled from TOML are emtpy - not var for var in [ - azureClientId, azureSecretId, azureTenantId - ] - ): - logger.error("One of your azure TOML entries in [credentials.azure] is empty!") - sys.exit(2) - - # Retrieve the values for the azure Enterprise Application Client ID, Secret Value & Tenant ID - # SSM - if self.credentialsLocation == "AWS_SSM": - # Client ID - azureClientId = self.get_credential_from_aws_ssm( - azureClientId, - "azure_ent_app_client_id_value" - ) - # Secret Value - azureSecretId = self.get_credential_from_aws_ssm( - azureSecretId, - "azure_ent_app_client_secret_id_value" - ) - # Tenant ID - azureTenantId = self.get_credential_from_aws_ssm( - azureTenantId, - "azure_ent_app_tenant_id_value" - ) - # AWS Secrets Manager - elif self.credentialsLocation == "AWS_SECRETS_MANAGER": - # Client ID - azureClientId = self.get_credential_from_aws_secrets_manager( - azureClientId, - "azure_ent_app_client_id_value" - ) - # Secret Value - azureSecretId = self.get_credential_from_aws_secrets_manager( - azureSecretId, - "azure_ent_app_client_secret_id_value" - ) - # Tenant ID - azureTenantId = self.get_credential_from_aws_secrets_manager( - azureTenantId, - "azure_ent_app_tenant_id_value" - ) - - # Create Azure Identity credentials from Client ID/Secret Value/Tenant ID - azureCredentials = self.create_azure_identity_credentials_from_client_secret( - clientId=azureClientId, - clientSecret=azureSecretId, - tenantId=azureTenantId - ) - - # If subscriptions aren't supplied, attempt to find which ones you have access to - if not azureSubscriptions: - logger.warning( - "No values provided for [regions_and_accounts.azure.azure_subscription_ids] - attempting to retrieve subscription IDs your Service Principal has access to..." - ) - azureSubscriptions = self.retrieve_azure_subscriptions_for_service_principal( - azureCredentials=azureCredentials - ) - # pass list of subscriptions and the creds off - self.azureSubscriptions = azureSubscriptions - self.azureCredentials = azureCredentials - - # Alibaba Cloud - elif assessmentTarget == "Alibaba": - logger.info("Coming soon!") - - ################################### - # SOFTWARE-AS-A-SERVICE PROVIDERS # - ################################### - - # ServiceNow - elif assessmentTarget == "Servicenow": - # Process data["credentials"]["servicenow"] - nothing needs to be assigned to `self` - serviceNowValues = data["credentials"]["servicenow"] - - snowInstanceName = serviceNowValues["servicenow_instance_name"] - snowInstanceRegion = serviceNowValues["servicenow_instance_region"] - snowUserName = serviceNowValues["servicenow_sspm_username"] - snowUserLoginBreachRate = serviceNowValues["servicenow_failed_login_breaching_rate"] - - if any( - # Check to make sure none of the variables pulled from TOML are emtpy - not var for var in [ - snowInstanceName, snowInstanceRegion, snowUserName, snowUserLoginBreachRate - ] - ): - logger.error(f"One of your ServiceNow TOML entries in [credentials.servicenow] is empty!") - sys.exit(2) + # GCP + if assessmentTarget == "GCP": + # Process ["gcp_project_ids"] + gcpProjects = list(data["regions_and_accounts"]["gcp"]["gcp_project_ids"]) + if not gcpProjects: + logger.error("No GCP Projects were provided in [regions_and_accounts.gcp.gcp_project_ids].") + sys.exit(2) + else: + self.gcpProjectIds = gcpProjects + + # Process ["gcp_service_account_json_payload_value"] + gcpCred = data["credentials"]["gcp"]["gcp_service_account_json_payload_value"] + if self.credentialsLocation == "CONFIG_FILE": + self.gcpServiceAccountJsonPayloadValue = gcpCred + elif self.credentialsLocation == "AWS_SSM": + self.gcpServiceAccountJsonPayloadValue = self.get_credential_from_aws_ssm( + gcpCred, + "gcp_service_account_json_payload_value" + ) + elif self.credentialsLocation == "AWS_SECRETS_MANAGER": + self.gcpServiceAccountJsonPayloadValue = self.get_credential_from_aws_secrets_manager( + gcpCred, + "gcp_service_account_json_payload_value" + ) + self.setup_gcp_credentials(self.gcpServiceAccountJsonPayloadValue) - # Retrieve ServiceNow ElectricEye user password - serviceNowPwVal = serviceNowValues["servicenow_sspm_password_value"] - if self.credentialsLocation == "CONFIG_FILE": - environ["SNOW_SSPM_PASSWORD"] = serviceNowPwVal - elif self.credentialsLocation == "AWS_SSM": - environ["SNOW_SSPM_PASSWORD"] = self.get_credential_from_aws_ssm( - serviceNowPwVal, - "servicenow_sspm_password_value" - ) - elif self.credentialsLocation == "AWS_SECRETS_MANAGER": - environ["SNOW_SSPM_PASSWORD"] = self.get_credential_from_aws_secrets_manager( - serviceNowPwVal, - "servicenow_sspm_password_value" - ) - # All other ServiceNow Values are written as environment variables and either provided - # to PySnow Clients or to ProductFields{} within the ASFF per Finding - environ["SNOW_INSTANCE_NAME"] = snowInstanceName - environ["SNOW_INSTANCE_REGION"] = snowInstanceRegion - environ["SNOW_SSPM_USERNAME"] = snowUserName - environ["SNOW_FAILED_LOGIN_BREACHING_RATE"] = snowUserLoginBreachRate - - # M365 - elif assessmentTarget == "M365": - # Process data["credentials"]["m365"] - values need to be assigned to self - m365Values = data["credentials"]["m365"] - - m365ClientId = m365Values["m365_ent_app_client_id_value"] - m365SecretId = m365Values["m365_ent_app_client_secret_id_value"] - m365TenantId = m365Values["m365_ent_app_tenant_id_value"] - m365TenantLocation = m365Values["m365_tenant_location"] - - if any( - # Check to make sure none of the variables pulled from TOML are emtpy - not var for var in [ - m365ClientId, m365SecretId, m365TenantId, m365TenantLocation - ] - ): - logger.error(f"One of your M365 TOML entries in [credentials.m365] is empty!") - sys.exit(2) - - # This value (tenant location) will always be in plaintext - self.m365TenantLocation = m365TenantLocation - - # Retrieve the values for the M365 Enterprise Application Client ID, Secret Value & Tenant ID - if self.credentialsLocation == "CONFIG_FILE": - self.m365ClientId = m365ClientId - self.m365SecretId = m365SecretId - self.m365TenantId = m365TenantId - # SSM - elif self.credentialsLocation == "AWS_SSM": - # Client ID - self.m365ClientId = self.get_credential_from_aws_ssm( - m365ClientId, - "m365_ent_app_client_id_value" - ) - # Secret Value - self.m365SecretId = self.get_credential_from_aws_ssm( - m365SecretId, - "m365_ent_app_client_secret_id_value" - ) - # Tenant ID - self.m365TenantId = self.get_credential_from_aws_ssm( - m365TenantId, - "m365_ent_app_tenant_id_value" - ) - # AWS Secrets Manager - elif self.credentialsLocation == "AWS_SECRETS_MANAGER": - # Client ID - self.m365ClientId = self.get_credential_from_aws_secrets_manager( - m365ClientId, - "m365_ent_app_client_id_value" - ) - # Secret Value - self.m365SecretId = self.get_credential_from_aws_secrets_manager( - m365SecretId, - "m365_ent_app_client_secret_id_value" - ) - # Tenant ID - self.m365TenantId = self.get_credential_from_aws_secrets_manager( - m365TenantId, - "m365_ent_app_tenant_id_value" - ) - - # Salesforce - elif assessmentTarget == "Salesforce": - # Process data["credentials"]["m365"] - values need to be assigned to self - salesforceValues = data["credentials"]["salesforce"] - - salesforceAppClientId = salesforceValues["salesforce_connected_app_client_id_value"] - salesforceAppClientSecret = salesforceValues["salesforce_connected_app_client_secret_value"] - salesforceApiUsername = salesforceValues["salesforce_api_enabled_username_value"] - salesforceApiPassword = salesforceValues["salesforce_api_enabled_password_value"] - salesforceUserSecurityToken = salesforceValues["salesforce_api_enabled_security_token_value"] - salesforceInstanceLocation = salesforceValues["salesforce_instance_location"] - salesforceFailedLoginBreachingRate = salesforceValues["salesforce_failed_login_breaching_rate"] - salesforceApiVersion = salesforceValues["salesforce_api_version"] - - if any( - # Check to make sure none of the variables pulled from TOML are emtpy - not var for var in [ - salesforceAppClientId, salesforceAppClientSecret, salesforceApiUsername, salesforceApiPassword, salesforceUserSecurityToken, salesforceInstanceLocation, salesforceFailedLoginBreachingRate, salesforceApiVersion - ] - ): - logger.error(f"One of your Salesforce TOML entries in [credentials.salesforce] is empty!") - sys.exit(2) + # Oracle Cloud Infrastructure (OCI) + if assessmentTarget == "OCI": + ociValues = data["regions_and_accounts"]["oci"] + + # Retrieve the OCIDs for Tenancy & User and the Region ID along with a list of Compartment OCIDs + ociTenancyId = str(ociValues["oci_tenancy_ocid"]) + ociUserId = str(ociValues["oci_user_ocid"]) + ociRegionName = str(ociValues["oci_region_name"]) + ociCompartments = list(ociValues["oci_compartment_ocids"]) + # Process the [credentials.oci] + ociUserApiKeyFingerprint = data["credentials"]["oci"]["oci_user_api_key_fingerprint_value"] + ociUserApiKeyPemValue = data["credentials"]["oci"]["oci_user_api_key_private_key_pem_contents_value"] + + if any( + # Check to make sure none of the variables pulled from TOML are emtpy + not var for var in [ + ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint, ociUserApiKeyPemValue + ] + ): + logger.error(f"One of your Oracle Cloud TOML entries in [regions_and_accounts.oci] or [credentials.oci] is empty!") + sys.exit(2) - # The failed login breaching rate and API Version will be in plaintext/env vars - environ["SALESFORCE_FAILED_LOGIN_BREACHING_RATE"] = salesforceFailedLoginBreachingRate - environ["SFDC_API_VERSION"] = salesforceApiVersion + # Assign ["regions_and_accounts"]["oci"] values to `self` + self.ociTenancyId = ociTenancyId + self.ociUserId = ociUserId + self.ociRegionName = ociRegionName + self.ociCompartments = ociCompartments + + # Process ["oci_user_api_key_fingerprint_value"] + ociUserApiKeyFingerprint = data["credentials"]["oci"]["oci_user_api_key_fingerprint_value"] + if self.credentialsLocation == "CONFIG_FILE": + ociUserApiKeyFingerprint = ociUserApiKeyFingerprint + elif self.credentialsLocation == "AWS_SSM": + ociUserApiKeyFingerprint = self.get_credential_from_aws_ssm( + ociUserApiKeyFingerprint, + "oci_user_api_key_fingerprint_value" + ) + elif self.credentialsLocation == "AWS_SECRETS_MANAGER": + ociUserApiKeyFingerprint = self.get_credential_from_aws_secrets_manager( + ociUserApiKeyFingerprint, + "oci_user_api_key_fingerprint_value" + ) + + self.ociUserApiKeyFingerprint = ociUserApiKeyFingerprint + + # Process ["oci_user_api_key_private_key_pem_contents_value"] + ociUserApiKeyPemLocation = data["credentials"]["oci"]["oci_user_api_key_private_key_pem_contents_value"] + if self.credentialsLocation == "CONFIG_FILE": + ociUserApiKeyPemLocation = ociUserApiKeyPemLocation + elif self.credentialsLocation == "AWS_SSM": + ociUserApiKeyPemLocation = self.get_credential_from_aws_ssm( + ociUserApiKeyPemLocation, + "oci_user_api_key_private_key_pem_contents_value" + ) + elif self.credentialsLocation == "AWS_SECRETS_MANAGER": + ociUserApiKeyPemLocation = self.get_credential_from_aws_secrets_manager( + ociUserApiKeyPemLocation, + "oci_user_api_key_private_key_pem_contents_value" + ) + + # Create the PEM file and save the location of it to environ + self.setup_oci_credentials(ociUserApiKeyPemLocation) + + # Azure + if assessmentTarget == "Azure": + # Process data["credentials"]["azure"] - values need to be assigned to self + azureValues = data["credentials"]["azure"] + + azureClientId = azureValues["azure_ent_app_client_id_value"] + azureSecretId = azureValues["azure_ent_app_client_secret_id_value"] + azureTenantId = azureValues["azure_ent_app_tenant_id_value"] + azureSubscriptions = data["regions_and_accounts"]["azure"]["azure_subscription_ids"] + + del azureValues + + if any( + # Check to make sure none of the variables pulled from TOML are emtpy + not var for var in [ + azureClientId, azureSecretId, azureTenantId + ] + ): + logger.error("One of your azure TOML entries in [credentials.azure] is empty!") + sys.exit(2) - # Location is parsed from the config directly - self.salesforceInstanceLocation = salesforceInstanceLocation + # Retrieve the values for the azure Enterprise Application Client ID, Secret Value & Tenant ID + # SSM + if self.credentialsLocation == "AWS_SSM": + # Client ID + azureClientId = self.get_credential_from_aws_ssm( + azureClientId, + "azure_ent_app_client_id_value" + ) + # Secret Value + azureSecretId = self.get_credential_from_aws_ssm( + azureSecretId, + "azure_ent_app_client_secret_id_value" + ) + # Tenant ID + azureTenantId = self.get_credential_from_aws_ssm( + azureTenantId, + "azure_ent_app_tenant_id_value" + ) + # AWS Secrets Manager + elif self.credentialsLocation == "AWS_SECRETS_MANAGER": + # Client ID + azureClientId = self.get_credential_from_aws_secrets_manager( + azureClientId, + "azure_ent_app_client_id_value" + ) + # Secret Value + azureSecretId = self.get_credential_from_aws_secrets_manager( + azureSecretId, + "azure_ent_app_client_secret_id_value" + ) + # Tenant ID + azureTenantId = self.get_credential_from_aws_secrets_manager( + azureTenantId, + "azure_ent_app_tenant_id_value" + ) + + # Create Azure Identity credentials from Client ID/Secret Value/Tenant ID + azureCredentials = self.create_azure_identity_credentials_from_client_secret( + clientId=azureClientId, + clientSecret=azureSecretId, + tenantId=azureTenantId + ) + + # If subscriptions aren't supplied, attempt to find which ones you have access to + if not azureSubscriptions: + logger.warning( + "No values provided for [regions_and_accounts.azure.azure_subscription_ids] - attempting to retrieve subscription IDs your Service Principal has access to..." + ) + azureSubscriptions = self.retrieve_azure_subscriptions_for_service_principal( + azureCredentials=azureCredentials + ) + # pass list of subscriptions and the creds off + self.azureSubscriptions = azureSubscriptions + self.azureCredentials = azureCredentials + + # Alibaba Cloud + if assessmentTarget == "Alibaba": + logger.info("Coming soon!") + + ################################### + # SOFTWARE-AS-A-SERVICE PROVIDERS # + ################################### + + # ServiceNow + if assessmentTarget == "Servicenow": + # Process data["credentials"]["servicenow"] - nothing needs to be assigned to `self` + serviceNowValues = data["credentials"]["servicenow"] + + snowInstanceName = serviceNowValues["servicenow_instance_name"] + snowInstanceRegion = serviceNowValues["servicenow_instance_region"] + snowUserName = serviceNowValues["servicenow_sspm_username"] + snowUserLoginBreachRate = serviceNowValues["servicenow_failed_login_breaching_rate"] + + if any( + # Check to make sure none of the variables pulled from TOML are emtpy + not var for var in [ + snowInstanceName, snowInstanceRegion, snowUserName, snowUserLoginBreachRate + ] + ): + logger.error(f"One of your ServiceNow TOML entries in [credentials.servicenow] is empty!") + sys.exit(2) + + # Retrieve ServiceNow ElectricEye user password + serviceNowPwVal = serviceNowValues["servicenow_sspm_password_value"] + if self.credentialsLocation == "CONFIG_FILE": + environ["SNOW_SSPM_PASSWORD"] = serviceNowPwVal + elif self.credentialsLocation == "AWS_SSM": + environ["SNOW_SSPM_PASSWORD"] = self.get_credential_from_aws_ssm( + serviceNowPwVal, + "servicenow_sspm_password_value" + ) + elif self.credentialsLocation == "AWS_SECRETS_MANAGER": + environ["SNOW_SSPM_PASSWORD"] = self.get_credential_from_aws_secrets_manager( + serviceNowPwVal, + "servicenow_sspm_password_value" + ) + # All other ServiceNow Values are written as environment variables and either provided + # to PySnow Clients or to ProductFields{} within the ASFF per Finding + environ["SNOW_INSTANCE_NAME"] = snowInstanceName + environ["SNOW_INSTANCE_REGION"] = snowInstanceRegion + environ["SNOW_SSPM_USERNAME"] = snowUserName + environ["SNOW_FAILED_LOGIN_BREACHING_RATE"] = snowUserLoginBreachRate + + # M365 + if assessmentTarget == "M365": + # Process data["credentials"]["m365"] - values need to be assigned to self + m365Values = data["credentials"]["m365"] + + m365ClientId = m365Values["m365_ent_app_client_id_value"] + m365SecretId = m365Values["m365_ent_app_client_secret_id_value"] + m365TenantId = m365Values["m365_ent_app_tenant_id_value"] + m365TenantLocation = m365Values["m365_tenant_location"] + + if any( + # Check to make sure none of the variables pulled from TOML are emtpy + not var for var in [ + m365ClientId, m365SecretId, m365TenantId, m365TenantLocation + ] + ): + logger.error(f"One of your M365 TOML entries in [credentials.m365] is empty!") + sys.exit(2) - # Retrieve the values for the Salesforce Client ID, Client Secret, Username, Password, and Security Token - # Local config file - if self.credentialsLocation == "CONFIG_FILE": - self.salesforceAppClientId = salesforceAppClientId - self.salesforceAppClientSecret = salesforceAppClientSecret - self.salesforceApiUsername = salesforceApiUsername - self.salesforceApiPassword = salesforceApiPassword - self.salesforceUserSecurityToken = salesforceUserSecurityToken - # SSM - elif self.credentialsLocation == "AWS_SSM": - # Client ID - self.salesforceAppClientId = self.get_credential_from_aws_ssm( - salesforceAppClientId, - "salesforce_connected_app_client_id_value" - ) - # Client Secret - self.salesforceAppClientSecret = self.get_credential_from_aws_ssm( - salesforceAppClientSecret, - "salesforce_connected_app_client_secret_value" - ) - # API Username - self.salesforceApiUsername = self.get_credential_from_aws_ssm( - salesforceApiUsername, - "salesforce_api_enabled_username_value" - ) - # API User Password - self.salesforceApiPassword = self.get_credential_from_aws_ssm( - salesforceApiPassword, - "salesforce_api_enabled_password_value" - ) - # API User Security Token - self.salesforceUserSecurityToken = self.get_credential_from_aws_ssm( - salesforceUserSecurityToken, - "salesforce_api_enabled_security_token_value" - ) - # AWS Secrets Manager - elif self.credentialsLocation == "AWS_SECRETS_MANAGER": - # Client ID - self.salesforceAppClientId = self.get_credential_from_aws_secrets_manager( - salesforceAppClientId, - "salesforce_connected_app_client_id_value" - ) - # Client Secret - self.salesforceAppClientSecret = self.get_credential_from_aws_secrets_manager( - salesforceAppClientSecret, - "salesforce_connected_app_client_secret_value" - ) - # API Username - self.salesforceApiUsername = self.get_credential_from_aws_secrets_manager( - salesforceApiUsername, - "salesforce_api_enabled_username_value" - ) - # API User Password - self.salesforceApiPassword = self.get_credential_from_aws_secrets_manager( - salesforceApiPassword, - "salesforce_api_enabled_password_value" - ) - # API User Security Token - self.salesforceUserSecurityToken = self.get_credential_from_aws_secrets_manager( - salesforceUserSecurityToken, - "salesforce_api_enabled_security_token_value" - ) + # This value (tenant location) will always be in plaintext + self.m365TenantLocation = m365TenantLocation + + # Retrieve the values for the M365 Enterprise Application Client ID, Secret Value & Tenant ID + if self.credentialsLocation == "CONFIG_FILE": + self.m365ClientId = m365ClientId + self.m365SecretId = m365SecretId + self.m365TenantId = m365TenantId + # SSM + elif self.credentialsLocation == "AWS_SSM": + # Client ID + self.m365ClientId = self.get_credential_from_aws_ssm( + m365ClientId, + "m365_ent_app_client_id_value" + ) + # Secret Value + self.m365SecretId = self.get_credential_from_aws_ssm( + m365SecretId, + "m365_ent_app_client_secret_id_value" + ) + # Tenant ID + self.m365TenantId = self.get_credential_from_aws_ssm( + m365TenantId, + "m365_ent_app_tenant_id_value" + ) + # AWS Secrets Manager + elif self.credentialsLocation == "AWS_SECRETS_MANAGER": + # Client ID + self.m365ClientId = self.get_credential_from_aws_secrets_manager( + m365ClientId, + "m365_ent_app_client_id_value" + ) + # Secret Value + self.m365SecretId = self.get_credential_from_aws_secrets_manager( + m365SecretId, + "m365_ent_app_client_secret_id_value" + ) + # Tenant ID + self.m365TenantId = self.get_credential_from_aws_secrets_manager( + m365TenantId, + "m365_ent_app_tenant_id_value" + ) + + # Salesforce + if assessmentTarget == "Salesforce": + # Process data["credentials"]["m365"] - values need to be assigned to self + salesforceValues = data["credentials"]["salesforce"] + + salesforceAppClientId = salesforceValues["salesforce_connected_app_client_id_value"] + salesforceAppClientSecret = salesforceValues["salesforce_connected_app_client_secret_value"] + salesforceApiUsername = salesforceValues["salesforce_api_enabled_username_value"] + salesforceApiPassword = salesforceValues["salesforce_api_enabled_password_value"] + salesforceUserSecurityToken = salesforceValues["salesforce_api_enabled_security_token_value"] + salesforceInstanceLocation = salesforceValues["salesforce_instance_location"] + salesforceFailedLoginBreachingRate = salesforceValues["salesforce_failed_login_breaching_rate"] + salesforceApiVersion = salesforceValues["salesforce_api_version"] + + if any( + # Check to make sure none of the variables pulled from TOML are emtpy + not var for var in [ + salesforceAppClientId, salesforceAppClientSecret, salesforceApiUsername, salesforceApiPassword, salesforceUserSecurityToken, salesforceInstanceLocation, salesforceFailedLoginBreachingRate, salesforceApiVersion + ] + ): + logger.error(f"One of your Salesforce TOML entries in [credentials.salesforce] is empty!") + sys.exit(2) - # Google Workspace - elif assessmentTarget == "GoogleWorkspace": - logger.info("Coming soon!") + # The failed login breaching rate and API Version will be in plaintext/env vars + environ["SALESFORCE_FAILED_LOGIN_BREACHING_RATE"] = salesforceFailedLoginBreachingRate + environ["SFDC_API_VERSION"] = salesforceApiVersion + + # Location is parsed from the config directly + self.salesforceInstanceLocation = salesforceInstanceLocation + + # Retrieve the values for the Salesforce Client ID, Client Secret, Username, Password, and Security Token + # Local config file + if self.credentialsLocation == "CONFIG_FILE": + self.salesforceAppClientId = salesforceAppClientId + self.salesforceAppClientSecret = salesforceAppClientSecret + self.salesforceApiUsername = salesforceApiUsername + self.salesforceApiPassword = salesforceApiPassword + self.salesforceUserSecurityToken = salesforceUserSecurityToken + # SSM + elif self.credentialsLocation == "AWS_SSM": + # Client ID + self.salesforceAppClientId = self.get_credential_from_aws_ssm( + salesforceAppClientId, + "salesforce_connected_app_client_id_value" + ) + # Client Secret + self.salesforceAppClientSecret = self.get_credential_from_aws_ssm( + salesforceAppClientSecret, + "salesforce_connected_app_client_secret_value" + ) + # API Username + self.salesforceApiUsername = self.get_credential_from_aws_ssm( + salesforceApiUsername, + "salesforce_api_enabled_username_value" + ) + # API User Password + self.salesforceApiPassword = self.get_credential_from_aws_ssm( + salesforceApiPassword, + "salesforce_api_enabled_password_value" + ) + # API User Security Token + self.salesforceUserSecurityToken = self.get_credential_from_aws_ssm( + salesforceUserSecurityToken, + "salesforce_api_enabled_security_token_value" + ) + # AWS Secrets Manager + elif self.credentialsLocation == "AWS_SECRETS_MANAGER": + # Client ID + self.salesforceAppClientId = self.get_credential_from_aws_secrets_manager( + salesforceAppClientId, + "salesforce_connected_app_client_id_value" + ) + # Client Secret + self.salesforceAppClientSecret = self.get_credential_from_aws_secrets_manager( + salesforceAppClientSecret, + "salesforce_connected_app_client_secret_value" + ) + # API Username + self.salesforceApiUsername = self.get_credential_from_aws_secrets_manager( + salesforceApiUsername, + "salesforce_api_enabled_username_value" + ) + # API User Password + self.salesforceApiPassword = self.get_credential_from_aws_secrets_manager( + salesforceApiPassword, + "salesforce_api_enabled_password_value" + ) + # API User Security Token + self.salesforceUserSecurityToken = self.get_credential_from_aws_secrets_manager( + salesforceUserSecurityToken, + "salesforce_api_enabled_security_token_value" + ) + + # Google Workspace + if assessmentTarget == "GoogleWorkspace": + logger.info("Coming soon!") + + # Snowflake + if assessmentTarget == "Snowflake": + # Process data["credentials"]["snowflake"] - values need to be assigned to self + snowflakeTomlValues = data["credentials"]["snowflake"] + + snowflakeUsername = str(snowflakeTomlValues["snowflake_username"]) + snowflakePasswordValue = str(snowflakeTomlValues["snowflake_password_value"]) + snowflakeAccountId = str(snowflakeTomlValues["snowflake_account_id"]) + snowflakeWarehouseName = str(snowflakeTomlValues["snowflake_warehouse_name"]) + snowflakeRegion = str(snowflakeTomlValues["snowflake_region"]) + serviceAccountExemptions = list(snowflakeTomlValues["snowflake_service_account_usernames"]) + + if any( + # Check to make sure none of the variables pulled from TOML are emtpy + not var for var in [ + snowflakeUsername, snowflakePasswordValue, snowflakeAccountId, snowflakeWarehouseName, snowflakeRegion + ] + ): + logger.error(f"One of your Snowflake TOML entries in [credentials.snowflake] is empty!") + sys.exit(2) - # Snowflake - elif assessmentTarget == "Snowflake": - logger.info("Coming soon!") + # Parse non-confidential values to environ + self.snowflakeUsername = snowflakeUsername + self.snowflakeAccountId = snowflakeAccountId + self.snowflakeWarehouseName = snowflakeWarehouseName + self.snowflakeRegion = snowflakeRegion + self.serviceAccountExemptions = serviceAccountExemptions + + # Retrieve value for Snowflake Password from the TOML, AWS SSM or AWS Secrets Manager + if self.credentialsLocation == "CONFIG_FILE": + self.snowflakePassowrd = snowflakePasswordValue + # SSM + elif self.credentialsLocation == "AWS_SSM": + self.snowflakePassowrd = self.get_credential_from_aws_ssm( + snowflakePasswordValue, + "snowflake_password_value" + ) + # AWS Secrets Manager + elif self.credentialsLocation == "AWS_SECRETS_MANAGER": + self.snowflakePassowrd = self.get_credential_from_aws_secrets_manager( + snowflakePasswordValue, + "snowflake_password_value" + ) + + # Retrieve cursor and connector + snowflakeCursorConn = self.create_snowflake_cursor() + + self.snowflakeConnection = snowflakeCursorConn[0] + self.snowflakeCursor = snowflakeCursorConn[1] + + # Non-TOML Args + if useToml == "False": + self.process_non_toml_args(assessmentTarget, args) def get_aws_regions(self): """ @@ -515,11 +568,13 @@ def get_aws_regions(self): return regions - def get_credential_from_aws_ssm(self, value, configurationName): + def get_credential_from_aws_ssm(self, value, configurationName) -> str: """ Retrieves a TOML variable from AWS Systems Manager Parameter Store and returns it """ + ssm = boto3.client("ssm") + if value is None or value == "": logger.error( "A value for %s was not provided. Fix the TOML file and run ElectricEye again.", @@ -542,10 +597,12 @@ def get_credential_from_aws_ssm(self, value, configurationName): return credential - def get_credential_from_aws_secrets_manager(self, value, configurationName): + def get_credential_from_aws_secrets_manager(self, value, configurationName) -> str: """ Retrieves a TOML variable from AWS Secrets Manager and returns it """ + asm = boto3.client("secretsmanager") + if value is None or value == "": logger.error( "A value for %s was not provided. Fix the TOML file and run ElectricEye again.", @@ -564,10 +621,12 @@ def get_credential_from_aws_secrets_manager(self, value, configurationName): return credential - def get_aws_accounts_from_organization(self): + def get_aws_accounts_from_organization(self) -> list[str]: """ Uses Organizations ListAccounts API to get a list of "ACTIVE" AWS Accounts in the entire Organization """ + org = boto3.client("organizations") + try: accounts = [account["Id"] for account in org.list_accounts()["Accounts"] if account["Status"] == "ACTIVE"] except ClientError as e: @@ -578,10 +637,13 @@ def get_aws_accounts_from_organization(self): return accounts - def get_aws_accounts_from_organizational_units(self, targets): + def get_aws_accounts_from_organizational_units(self, targets) -> list[str]: """ Uses Organizations ListAccountsForParent API to get a list of "ACTIVE" AWS Accounts for specified OUs """ + sts = boto3.client("sts") + org = boto3.client("organizations") + accounts = [sts.get_caller_identity()["Account"]] # Caller account is added directly. for parent in targets: @@ -605,6 +667,8 @@ def create_aws_session(account: str, partition: str, region: str, roleName: str) """ crossAccountRoleArn = f"arn:{partition}:iam::{account}:role/{roleName}" + sts = boto3.client("sts") + try: memberAcct = sts.assume_role( RoleArn=crossAccountRoleArn, @@ -628,7 +692,7 @@ def create_aws_session(account: str, partition: str, region: str, roleName: str) return session # This function is called outside of this Class and from create_aws_session() - def check_aws_partition(region: str): + def check_aws_partition(region: str) -> str: """ Returns the AWS Partition based on the current Region of a Session """ @@ -658,7 +722,7 @@ def check_aws_partition(region: str): return partition # This function is called outside of this Class - def get_aws_support_eligibility(session): + def get_aws_support_eligibility(session) -> bool: support = session.client("support") try: @@ -676,7 +740,7 @@ def get_aws_support_eligibility(session): return supportEligible # This function is called outside of this Class - def get_aws_shield_advanced_eligibility(session): + def get_aws_shield_advanced_eligibility(session) -> bool: shield = session.client("shield") try: @@ -693,7 +757,7 @@ def get_aws_shield_advanced_eligibility(session): return shieldEligible - def setup_gcp_credentials(self, credentialValue): + def setup_gcp_credentials(self, credentialValue) -> None: """ The Python Google Client SDK defaults to checking for credentials in the "GOOGLE_APPLICATION_CREDENTIALS" environment variable. This can be the location of a GCP Service Account (SA) Key which is stored in a JSON file. @@ -721,7 +785,7 @@ def setup_gcp_credentials(self, credentialValue): logger.info("%s saved to environment variable", credentials_file_path) environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentials_file_path - def setup_oci_credentials(self, credentialValue): + def setup_oci_credentials(self, credentialValue) -> None: """ Oracle Cloud Python SDK Config object can be created and requires the path to a PEM file, we can save the PEM contents to a file and save the location to an environment variable to be used @@ -771,5 +835,138 @@ def retrieve_azure_subscriptions_for_service_principal(self, azureCredentials: C sys.exit(2) return azureSubscriptionIds + + def create_snowflake_cursor(self) -> tuple[snowconn.connection.SnowflakeConnection, snowconn.cursor.SnowflakeCursor]: + """ + Returns a Snowflake cursor object for a given warehouse + """ + try: + conn = snowconn.connect( + user=self.snowflakeUsername, + password=self.snowflakePassowrd, + account=self.snowflakeAccountId, + warehouse=self.snowflakeWarehouseName + ) + except Exception as e: + raise e + + # This allows us to return a dictionary instead of tuples + logger.info("Connected to Snowflake successfully.") + cur = conn.cursor(snowconn.DictCursor) + + # Use the warehouse provided, this is a required step if a custom role is used to catch if the custom role was not given a grant to the warehouse + try: + war = cur.execute(f"use warehouse {self.snowflakeWarehouseName}").fetchall() + logger.info("Using warehouse %s. %s", self.snowflakeWarehouseName, war) + except snowconn.errors.ProgrammingError as e: + logger.error( + "Failed to use warehouse %s: %s", + self.snowflakeWarehouseName, e + ) + raise e + + return conn, cur + + def process_non_toml_args(self, assessmentTarget: str, args: dict) -> None: + """ + Process any additional arguments passed to the script that are not in the TOML file + """ + # First, process out the credentialsLocation arg ["AWS_SSM", "AWS_SECRETS_MANAGER", "CONFIG_FILE"] + try: + self.credentialsLocation = args.get("credentials_location") + except KeyError as ke: + logger.error( + "The credentials_location argument was not provided: %s", ke + ) + sys.exit(2) + # AWS + if assessmentTarget == "AWS": + sts = boto3.client("sts") + # First process the global "aws_multi_account_target_type" and "aws_account_targets" args + try: + awsMultiAccountTargetType = str(args.get("aws_multi_account_target_type")) + awsAccountTargets = list(args.get("aws_account_targets")) + awsRegionsSelection = list(args.get("aws_regions_selection")) + electricEyeRoleName = args.get("aws_electric_eye_iam_role_name") + except KeyError as ke: + logger.error( + "One of the required global AWS arguments was not provided: %s", ke + ) + sys.exit(2) + # Process account targets based on the multi-account target type + if awsMultiAccountTargetType == "Accounts": + if not awsAccountTargets: + self.awsAccountTargets = [sts.get_caller_identity()["Account"]] + else: + self.awsAccountTargets = awsAccountTargets + if awsMultiAccountTargetType == "OU": + if not awsAccountTargets: + logger.error("OU was specified but targets were not specified.") + sys.exit(2) + # Regex to check for Valid OUs + ouIdRegex = compile(r"^ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}$") + for ou in awsAccountTargets: + if not ouIdRegex.match(ou): + logger.error(f"Invalid Organizational Unit ID {ou}.") + sys.exit(2) + self.awsAccountTargets = self.get_aws_accounts_from_organizational_units(awsAccountTargets) + if awsMultiAccountTargetType == "Organization": + self.awsAccountTargets = self.get_aws_accounts_from_organization() + + # Process aws_regions_selection + awsRegions = self.get_aws_regions() + if not awsRegionsSelection: + self.awsRegionsSelection = [boto3.Session().region_name] + else: + if "All" in awsRegionsSelection or "all" in awsRegionsSelection: + self.awsRegionsSelection = awsRegions + else: + # Validation check + self.awsRegionsSelection = [a for a in awsRegionsSelection if a in awsRegions] + # Process ["aws_electric_eye_iam_role_name"] + if electricEyeRoleName is None or electricEyeRoleName == "": + logger.warning( + "A value for ['aws_electric_eye_iam_role_name'] was not provided. Will attempt to use current session credentials, this will likely fail if you're attempting to assess another AWS account." + ) + self.electricEyeRoleName = None + + self.electricEyeRoleName = electricEyeRoleName + + # Snowflake + if assessmentTarget == "Snowflake": + try: + self.snowflakeUsername = str(args.get("snowflake_username")) + self.snowflakePasswordValue = str(args.get("snowflake_password_value")) + self.snowflakeAccountId = str(args.get("snowflake_account_id")) + self.snowflakeWarehouseName = str(args.get("snowflake_warehouse_name")) + self.snowflakeRegion = str(args.get("snowflake_region")) + self.serviceAccountExemptions = list(args.get("snowflake_service_account_usernames")) + except KeyError as ke: + logger.error( + "One of the required Snowflake arguments was not provided: %s", ke + ) + sys.exit(2) + + # Retrieve value for Snowflake Password from the TOML, AWS SSM or AWS Secrets Manager + if self.credentialsLocation == "CONFIG_FILE": + self.snowflakePassowrd = self.snowflakePasswordValue + # SSM + if self.credentialsLocation == "AWS_SSM": + self.snowflakePassowrd = self.get_credential_from_aws_ssm( + self.snowflakePasswordValue, + "snowflake_password_value" + ) + # AWS Secrets Manager + if self.credentialsLocation == "AWS_SECRETS_MANAGER": + self.snowflakePassowrd = self.get_credential_from_aws_secrets_manager( + self.snowflakePasswordValue, + "snowflake_password_value" + ) + + # Setup Cursor and Connector + snowflakeCursorConn = self.create_snowflake_cursor() + + self.snowflakeConnection = snowflakeCursorConn[0] + self.snowflakeCursor = snowflakeCursorConn[1] ## EOF \ No newline at end of file diff --git a/eeauditor/controller.py b/eeauditor/controller.py index 7baab1df..88d698c0 100644 --- a/eeauditor/controller.py +++ b/eeauditor/controller.py @@ -20,46 +20,56 @@ import sys import click -from insights import create_sechub_insights from eeauditor import EEAuditor from processor.main import get_providers, process_findings from os import environ -def print_controls(assessmentTarget, auditorName=None): - app = EEAuditor(assessmentTarget) +def print_controls(assessmentTarget, args, useToml, auditorName=None, tomlPath=None): + app = EEAuditor(assessmentTarget, args, useToml, tomlPath) app.load_plugins(auditorName) app.print_controls_json() -def print_checks(assessmentTarget, auditorName=None): - app = EEAuditor(assessmentTarget) +def print_checks(assessmentTarget, args, useToml, auditorName=None, tomlPath=None): + app = EEAuditor(assessmentTarget, args, useToml, tomlPath) app.load_plugins(auditorName) app.print_checks_md() -def run_auditor(assessmentTarget, auditorName=None, pluginName=None, delay=0, outputs=None, outputFile="", tomlPath=None): +def run_auditor(assessmentTarget, args, useToml, auditorName=None, pluginName=None, delay=0, outputs=None, outputFile="", tomlPath=None): if not outputs: outputs = ["stdout"] - app = EEAuditor(assessmentTarget, tomlPath) + app = EEAuditor(assessmentTarget, args, useToml, tomlPath) app.load_plugins(auditorName) # Per-target calls - ensure you use the right run_*_checks*() function + + # Amazon Web Services if assessmentTarget == "AWS": findings = list(app.run_aws_checks(pluginName=pluginName, delay=delay)) - elif assessmentTarget == "GCP": + # Google Cloud Platform + if assessmentTarget == "GCP": findings = list(app.run_gcp_checks(pluginName=pluginName, delay=delay)) - elif assessmentTarget == "OCI": + # Oracle Cloud Infrastructure + if assessmentTarget == "OCI": findings = list(app.run_oci_checks(pluginName=pluginName, delay=delay)) - elif assessmentTarget == "Azure": + # Microsoft Azure + if assessmentTarget == "Azure": findings = list(app.run_azure_checks(pluginName=pluginName, delay=delay)) - elif assessmentTarget == "M365": + # Microsoft 365 + if assessmentTarget == "M365": findings = list(app.run_m365_checks(pluginName=pluginName, delay=delay)) - elif assessmentTarget == "Salesforce": + # Salesforce + if assessmentTarget == "Salesforce": findings = list(app.run_salesforce_checks(pluginName=pluginName, delay=delay)) - else: + # Snowflake + if assessmentTarget == "Snowflake": + findings = list(app.run_snowflake_checks(pluginName=pluginName, delay=delay)) + # ServiceNow + if assessmentTarget == "ServiceNow": findings = list(app.run_non_aws_checks(pluginName=pluginName, delay=delay)) print(f"Done running Checks for {assessmentTarget}") @@ -90,31 +100,32 @@ def run_auditor(assessmentTarget, auditorName=None, pluginName=None, delay=0, ou "GCP", "Servicenow", "M365", - "Salesforce" + "Salesforce", + "Snowflake" ], case_sensitive=True ), - help="CSP or SaaS Vendor Assessment Target, ensure that any -a or -c arg maps to your target provider e.g., -t AWS -a Amazon_APGIW_Auditor" + help="Public cloud or SaaS assessment target, ensure that any -a or -c arg maps to your target provider to avoid any errors. e.g., -t AWS -a Amazon_APGIW_Auditor" ) # Run Specific Auditor @click.option( "-a", "--auditor-name", default="", - help="Specify which Auditor you want to run by using its name NOT INCLUDING .py. Defaults to ALL Auditors" + help="Specify which Auditor you want to run by using its name NOT INCLUDING .py. . Use the --list-checks arg to receive a list. Defaults to ALL Auditors" ) # Run Specific Check @click.option( "-c", "--check-name", default="", - help="A specific Check in a specific Auditor you want to run, this correlates to the function name. Defaults to ALL Checks") + help="A specific Check in a specific Auditor you want to run, this correlates to the function name. Use the --list-checks arg to receive a list. Defaults to ALL Checks") # Delay @click.option( "-d", "--delay", default=0, - help="Time in seconds to sleep between Auditors being ran, defaults to 0" + help="Time in seconds to sleep between Auditors being ran, defaults to 0. Use this argument to avoid rate limiting" ) # Outputs @click.option( @@ -127,6 +138,7 @@ def run_auditor(assessmentTarget, auditorName=None, pluginName=None, delay=0, ou ) # Output File Name @click.option( + "-of", "--output-file", default="output", show_default=True, @@ -134,34 +146,52 @@ def run_auditor(assessmentTarget, auditorName=None, pluginName=None, delay=0, ou ) # List Output Options @click.option( + "-lo", "--list-options", is_flag=True, help="Lists all valid Output options" ) # List Checks @click.option( + "-lch", "--list-checks", is_flag=True, - help="Prints a table of Auditors, Checks, and Check descriptions to stdout - use this for -a or -c args" -) -# Insights -@click.option( - "--create-insights", - is_flag=True, - help="Create AWS Security Hub Insights for ElectricEye. This only needs to be done once per Account per Region for Security Hub", + help="Prints a table of Auditors, Checks, and Check descriptions to stdout - use this command for help with populating -a (Auditor selection) or -c (Check selection) args" ) # Controls (Description) @click.option( + "-lco", "--list-controls", is_flag=True, - help="Lists all ElectricEye Controls (e.g. Check Titles) for an Assessment Target" + help="Lists all ElectricEye controls - that is to say: the Check Titles - for an Assessment Target" ) # TOML Path @click.option( + "-tp", "--toml-path", default=None, help="The full path to the TOML file used for configure e.g., ~/path/to/mydir/external_providers.toml. If this value is not provided the default path of ElectricEye/eeauditor/external_providers.toml is used." ) +# Use TOML +@click.option( + "-ut", + "--use-toml", + default="True", + type=click.Choice( + [ + "True", + "False" + ], + case_sensitive=True + ), + help="Set to False to disable the use of the TOML file for external providers, defaults to True. THIS IS AN EXPERIMENTAL FEATURE!" +) +# EXPERIMENTAL: Supply arguments in a stringified dictionary format +@click.option( + "--args", + default=None, + help="Supply arguments in a stringified dictionary format, e.g., '{\"credentials_location\": \"CONFIG_FILE\", \"snowflake_username\": \"ELECTRIC_EYE\"}'. THIS IS AN EXPERIMENTAL FEATURE!" +) def main( target_provider, @@ -172,13 +202,17 @@ def main( output_file, list_options, list_checks, - create_insights, list_controls, - toml_path + toml_path, + use_toml, + args ): if list_controls: print_controls( - assessmentTarget=target_provider + assessmentTarget=target_provider, + args=args, + tomlPath=toml_path, + useToml=use_toml, ) sys.exit(0) @@ -192,23 +226,26 @@ def main( if list_checks: print_checks( - assessmentTarget=target_provider + assessmentTarget=target_provider, + args=args, + tomlPath=toml_path, + useToml=use_toml, ) sys.exit(0) - if create_insights: - create_sechub_insights() - sys.exit(0) - run_auditor( assessmentTarget=target_provider, + args=args, auditorName=auditor_name, pluginName=check_name, delay=delay, outputs=outputs, outputFile=output_file, - tomlPath=toml_path + tomlPath=toml_path, + useToml=use_toml ) if __name__ == "__main__": - main(sys.argv[1:]) \ No newline at end of file + main(sys.argv[1:]) + +# EOF \ No newline at end of file diff --git a/eeauditor/eeauditor.py b/eeauditor/eeauditor.py index 12bc0761..c808a9dc 100644 --- a/eeauditor/eeauditor.py +++ b/eeauditor/eeauditor.py @@ -22,15 +22,14 @@ from os import path from functools import partial from inspect import getfile -import sys from time import sleep -from traceback import format_exc import json from requests import get from check_register import CheckRegister from cloud_utils import CloudConfig from pluginbase import PluginBase +logging.basicConfig(level=logging.INFO) logger = logging.getLogger("EEAuditor") here = path.abspath(path.dirname(__file__)) @@ -42,9 +41,8 @@ class EEAuditor(object): credentials and cross-boundary configurations, and runs Checks and yields results back to controller.py CLI """ - def __init__(self, assessmentTarget, tomlPath=None, searchPath=None): - # each check must be decorated with the @registry.register_check("cache_name") - # to be discovered during plugin loading. + def __init__(self, assessmentTarget, args, useToml, tomlPath=None, searchPath=None): + # each check must be decorated with the @registry.register_check("cache_name") to be discovered during plugin loading. self.registry = CheckRegister() self.name = assessmentTarget self.plugin_base = PluginBase(package="electriceye") @@ -54,7 +52,7 @@ def __init__(self, assessmentTarget, tomlPath=None, searchPath=None): # AWS if assessmentTarget == "AWS": searchPath = "./auditors/aws" - utils = CloudConfig(assessmentTarget, tomlPath) + utils = CloudConfig(assessmentTarget, tomlPath, useToml, args) # parse specific values for Assessment Target - these should match 1:1 with CloudConfig self.awsAccountTargets = utils.awsAccountTargets self.awsRegionsSelection = utils.awsRegionsSelection @@ -62,13 +60,13 @@ def __init__(self, assessmentTarget, tomlPath=None, searchPath=None): # GCP if assessmentTarget == "GCP": searchPath = "./auditors/gcp" - utils = CloudConfig(assessmentTarget, tomlPath) + utils = CloudConfig(assessmentTarget, tomlPath, useToml, args) # parse specific values for Assessment Target - these should match 1:1 with CloudConfig self.gcpProjectIds = utils.gcp_project_ids # OCI if assessmentTarget == "OCI": searchPath = "./auditors/oci" - utils = CloudConfig(assessmentTarget, tomlPath) + utils = CloudConfig(assessmentTarget, tomlPath, useToml, args) # parse specific values for Assessment Target - these should match 1:1 with CloudConfig self.ociTenancyId = utils.ociTenancyId self.ociUserId = utils.ociUserId @@ -78,14 +76,14 @@ def __init__(self, assessmentTarget, tomlPath=None, searchPath=None): # Azure if assessmentTarget == "Azure": searchPath = "./auditors/azure" - utils = CloudConfig(assessmentTarget, tomlPath) + utils = CloudConfig(assessmentTarget, tomlPath, useToml, args) # parse specific values for Assessment Target - these should match 1:1 with CloudConfig self.azureSubscriptions = utils.azureSubscriptions self.azureCredentials = utils.azureCredentials # Alibaba if assessmentTarget == "Alibaba": searchPath = "./auditors/alibabacloud" - utils = CloudConfig(assessmentTarget, tomlPath) + utils = CloudConfig(assessmentTarget, tomlPath, useToml, args) ################################### # SOFTWARE-AS-A-SERVICE PROVIDERS # @@ -93,11 +91,11 @@ def __init__(self, assessmentTarget, tomlPath=None, searchPath=None): # Servicenow if assessmentTarget == "Servicenow": searchPath = "./auditors/servicenow" - utils = CloudConfig(assessmentTarget, tomlPath) + utils = CloudConfig(assessmentTarget, tomlPath, useToml, args) # M365 if assessmentTarget == "M365": searchPath = "./auditors/m365" - utils = CloudConfig(assessmentTarget, tomlPath) + utils = CloudConfig(assessmentTarget, tomlPath, useToml, args) # parse specific values for Assessment Target - these should match 1:1 with CloudConfig self.m365TenantLocation = utils.m365TenantLocation self.m365ClientId = utils.m365ClientId @@ -106,7 +104,8 @@ def __init__(self, assessmentTarget, tomlPath=None, searchPath=None): # Salesforce if assessmentTarget == "Salesforce": searchPath = "./auditors/salesforce" - utils = CloudConfig(assessmentTarget, tomlPath) + utils = CloudConfig(assessmentTarget, tomlPath, useToml, args) + # parse specific values for Assessment Target - these should match 1:1 with CloudConfig self.salesforceAppClientId = utils.salesforceAppClientId self.salesforceAppClientSecret = utils.salesforceAppClientSecret self.salesforceApiUsername = utils.salesforceApiUsername @@ -116,11 +115,17 @@ def __init__(self, assessmentTarget, tomlPath=None, searchPath=None): # Snowflake if assessmentTarget == "Snowflake": searchPath = "./auditors/snowflake" - utils = CloudConfig(assessmentTarget, tomlPath) + utils = CloudConfig(assessmentTarget, tomlPath, useToml, args) + # parse specific values for Assessment Target - these should match 1:1 with CloudConfig + self.snowflakeAccountId = utils.snowflakeAccountId + self.snowflakeRegion = utils.snowflakeRegion + self.snowflakeCursor = utils.snowflakeCursor + self.snowflakeConnection = utils.snowflakeConnection + self.serviceAccountExemptions = utils.serviceAccountExemptions # Google Workspace if assessmentTarget == "GoogleWorkspace": searchPath = "./auditors/google_workspace" - utils = CloudConfig(assessmentTarget, tomlPath) + utils = CloudConfig(assessmentTarget, tomlPath, useToml, args) # Search path for Auditors self.source = self.plugin_base.make_plugin_source( @@ -221,6 +226,7 @@ def run_aws_checks(self, pluginName=None, delay=0): """ Runs AWS Auditors across all TOML-specified Accounts and Regions in a specific Partition """ + import boto3 # "Global" Auditors that should only need to be ran once per Account globalAuditors = ["cloudfront", "globalaccelerator", "iam", "health", "support", "account", "s3"] @@ -233,28 +239,36 @@ def run_aws_checks(self, pluginName=None, delay=0): ) for account in self.awsAccountTargets: - # This list will contain the "global" services so they're not run multiple times globalAuditorsCompleted = [] for region in self.awsRegionsSelection: + # Dervice the Partition ID from the AWS Region - needed for ASFF & service availability checks + partition = CloudConfig.check_aws_partition(region) + # attempt to use current session creds + if self.electricEyeRoleName is None or self.electricEyeRoleName == "": + session = boto3.Session(region_name=region) + logger.info( + "Using current session credentials for Account %s in region %s", + account, region + ) + # Setup Boto3 Session with STS AssumeRole + else: + session = CloudConfig.create_aws_session( + account, + partition, + region, + self.electricEyeRoleName + ) + logger.info( + "Using STS AssumeRole credentials for Account %s in region %s", + account, region + ) + for serviceName, checkList in self.registry.checks.items(): # Pass the Cache at the "serviceName" level aka Plugin auditorCache = {} - # Dervice the Partition ID from the AWS Region - needed for ASFF & service availability checks - partition = CloudConfig.check_aws_partition(region) - # Setup Boto3 Session with STS AssumeRole - if self.electricEyeRoleName is not None: - session = CloudConfig.create_aws_session( - account, - partition, - region, - self.electricEyeRoleName - ) - # attempt to use current session creds - else: - import boto3 - session = boto3.Session(region_name=region) + # Check service availability, not always accurate if self.check_service_endpoint_availability(endpointData, partition, serviceName, region) is False: logger.info( @@ -304,22 +318,23 @@ def run_aws_checks(self, pluginName=None, delay=0): ): try: logger.info( - "Executing Check %s for Account %s in region %s", + "Executing AWS Check %s for Account %s in region %s", checkName, account, region ) + for finding in check( cache=auditorCache, session=session, awsAccountId=account, awsRegion=region, - awsPartition=partition, + awsPartition=partition ): if finding is not None: yield finding - except Exception: - logger.warn( - "Failed to execute check %s with traceback %s", - checkName, format_exc() + except Exception as e: + logger.warning( + "Failed to execute check %s with exception: %s", + checkName, e ) # optional sleep if specified - defaults to 0 seconds @@ -360,10 +375,10 @@ def run_gcp_checks(self, pluginName=None, delay=0): ): if finding is not None: yield finding - except Exception: - logger.warn( - "Failed to execute check %s with traceback %s", - checkName, format_exc() + except Exception as e: + logger.warning( + "Failed to execute check %s with exception: %s", + checkName, e ) # optional sleep if specified - defaults to 0 seconds sleep(delay) @@ -378,6 +393,8 @@ def run_oci_checks(self, pluginName=None, delay=0): account = "000000000000" partition = "not-aws" + logger.info("Oracle Cloud Infrastructure assessment has started.") + for serviceName, checkList in self.registry.checks.items(): # Pass the Cache at the "serviceName" level aka Plugin auditorCache = {} @@ -406,10 +423,10 @@ def run_oci_checks(self, pluginName=None, delay=0): ): if finding is not None: yield finding - except Exception: - logger.warn( - "Failed to execute check %s with traceback %s", - checkName, format_exc() + except Exception as e: + logger.warning( + "Failed to execute check %s with exception: %s", + checkName, e ) # optional sleep if specified - defaults to 0 seconds sleep(delay) @@ -424,6 +441,8 @@ def run_azure_checks(self, pluginName=None, delay=0): account = "000000000000" partition = "not-aws" + logger.info("Microsoft Azure assessment has started.") + for azSubId in self.azureSubscriptions: for serviceName, checkList in self.registry.checks.items(): # Pass the Cache at the "serviceName" level aka Plugin @@ -450,10 +469,10 @@ def run_azure_checks(self, pluginName=None, delay=0): ): if finding is not None: yield finding - except Exception: - logger.warn( - "Failed to execute check %s with traceback %s", - checkName, format_exc() + except Exception as e: + logger.warning( + "Failed to execute check %s with exception: %s", + checkName, e ) # optional sleep if specified - defaults to 0 seconds sleep(delay) @@ -468,6 +487,8 @@ def run_m365_checks(self, pluginName=None, delay=0): account = "000000000000" partition = "not-aws" + logger.info("M365 assessment has started.") + for serviceName, checkList in self.registry.checks.items(): # Pass the Cache at the "serviceName" level aka Plugin auditorCache = {} @@ -495,10 +516,10 @@ def run_m365_checks(self, pluginName=None, delay=0): ): if finding is not None: yield finding - except Exception: - logger.warn( - "Failed to execute check %s with traceback %s", - checkName, format_exc() + except Exception as e: + logger.warning( + "Failed to execute check %s with exception: %s", + checkName, e ) # optional sleep if specified - defaults to 0 seconds sleep(delay) @@ -514,6 +535,8 @@ def run_salesforce_checks(self, pluginName=None, delay=0): account = "000000000000" partition = "not-aws" + logger.info("Salesforce assessment has started.") + for serviceName, checkList in self.registry.checks.items(): # Pass the Cache at the "serviceName" level aka Plugin auditorCache = {} @@ -526,7 +549,7 @@ def run_salesforce_checks(self, pluginName=None, delay=0): ): try: logger.info( - "Executing Check %s for Salesforce instance", + "Executing Check %s for Salesforce", checkName ) for finding in check( @@ -543,14 +566,70 @@ def run_salesforce_checks(self, pluginName=None, delay=0): ): if finding is not None: yield finding - except Exception: - logger.warn( - "Failed to execute check %s with traceback %s", - checkName, format_exc() + except Exception as e: + logger.warning( + "Failed to execute check %s with exception: %s", + checkName, e ) # optional sleep if specified - defaults to 0 seconds sleep(delay) + # Called from eeauditor/controller.py run_auditor() + def run_snowflake_checks(self, pluginName=None, delay=0): + """ + Runs Snowflake Auditors using Username and Password for a given Warehouse + """ + # hardcode the region and account for non-AWS checks + region = "us-placeholder-1" + account = "000000000000" + partition = "not-aws" + + logger.info("Snowflake assessment has started.") + + for serviceName, checkList in self.registry.checks.items(): + # Pass the Cache at the "serviceName" level aka Plugin + auditorCache = {} + for checkName, check in checkList.items(): + # if a specific check is requested, only run that one check + if ( + not pluginName + or pluginName + and pluginName == checkName + ): + try: + logger.info( + "Executing Check %s for Snowflake", + checkName + ) + for finding in check( + cache=auditorCache, + awsAccountId=account, + awsRegion=region, + awsPartition=partition, + snowflakeAccountId=self.snowflakeAccountId, + snowflakeRegion=self.snowflakeRegion, + snowflakeCursor=self.snowflakeCursor, + serviceAccountExemptions=self.serviceAccountExemptions + ): + if finding is not None: + yield finding + except Exception as e: + logger.warning( + "Failed to execute check %s with exception: %s", + checkName, e + ) + # optional sleep if specified - defaults to 0 seconds + sleep(delay) + + # close the connection to the Snowflake Warehouse + curClose = self.snowflakeCursor.close() + connClose = self.snowflakeConnection.close() + + if curClose is True and connClose is None: + logger.info("Snowflake connection and cursor closed.") + else: + logger.warning("Failed to close Snowflake connection and/or cursor.") + # Called from eeauditor/controller.py run_auditor() def run_non_aws_checks(self, pluginName=None, delay=0): """ @@ -584,10 +663,10 @@ def run_non_aws_checks(self, pluginName=None, delay=0): ): if finding is not None: yield finding - except Exception: - logger.warn( - "Failed to execute check %s with traceback %s", - checkName, format_exc() + except Exception as e: + logger.warning( + "Failed to execute check %s with exception: %s", + checkName, e ) # optional sleep if specified - defaults to 0 seconds sleep(delay) @@ -630,5 +709,5 @@ def print_controls_json(self): controlPrinter.append(description) print(json.dumps(controlPrinter,indent=4)) - + # EOF \ No newline at end of file diff --git a/eeauditor/external_providers.toml b/eeauditor/external_providers.toml index 0e174258..cfbe1a55 100644 --- a/eeauditor/external_providers.toml +++ b/eeauditor/external_providers.toml @@ -232,6 +232,34 @@ title = "ElectricEye Configuration" salesforce_api_version = "v58.0" + [credentials.snowflake] + + # Username for your Snowflake Account, this should be a user with the ability to read all tables and views in the default schemas + + snowflake_username = "" + + # The location (or actual contents) of the Password for the User specified in `snowflake_account_id` + # this location must match the value of `global.credentials_location` e.g., if you specify "AWS_SSM" then + # the value for this variable should be the name of the AWS Systems Manager Parameter Store SecureString Parameter + + snowflake_password_value = "" + + # The Account ID for your Snowflake Account, this is found in the URL when you login to your Snowflake Account, e.g., VULEDAR-MR69420 + + snowflake_account_id = "" + + # The name of the warehouse you use for querying data in Snowflake, this should be a warehouse that has the ability to run queries + + snowflake_warehouse_name = "" + + # The Region of your Snowflake Account, this is found in the URL when you login to your Snowflake Account, e.g., us-east-1 + + snowflake_region = "" + + # OPTIONAL! The Usernames of "Service Accounts" created in Snowflake, this will optionally exempt these Usernames from being audited against the following checks: snowflake_password_assigned_user_has_mfa_check, snowflake_accountadmins_have_email_check, snowflake_admin_default_role_check, snowflake_logins_without_mfa_check, snowflake_bypass_mfa_review_check + + snowflake_service_account_usernames = [] + [outputs] # ***IMPORTANT*** @@ -272,20 +300,6 @@ title = "ElectricEye Configuration" postgresql_port = 5432 - [outputs.firemon_cloud_defense] # This takes place of the former DisruptOps ("dops") values, but the integration is largely the same - - # Client ID for your Firemon Cloud Defense (formerly known as DisruptOps (dops)) tenant - this location must match - # the value of `global. credentials_location` e.g., if you specify "AWS_SSM" then the value for this variable - # should be the name of the AWS Systems Manager Parameter Store SecureString Parameter - - firemon_cloud_defense_client_id_value = "" - - # API Key for your Firemon Cloud Defense (formerly known as DisruptOps (dops)) tenant - this location must match - # the value of `global. credentials_location` e.g., if you specify "AWS_SSM" then the value for this variable - # should be the name of the AWS Systems Manager Parameter Store SecureString Parameter - - firemon_cloud_defense_api_key_value = "" - [outputs.mongodb] # This unifies the old "docdb" output to account for local MongoDB and AWS DocumentDB # This value indicates whether or not you are using a password for your MongoDB deployment (which you should). If @@ -345,7 +359,7 @@ title = "ElectricEye Configuration" # Batch Size - amazon_sqs_batch_size = 10 # This must be an integer + amazon_sqs_batch_size = 1000 # This must be an integer # Queue Region diff --git a/eeauditor/insights.py b/eeauditor/insights.py deleted file mode 100644 index 0975845d..00000000 --- a/eeauditor/insights.py +++ /dev/null @@ -1,86 +0,0 @@ -#This file is part of ElectricEye. -#SPDX-License-Identifier: Apache-2.0 - -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at - -#http://www.apache.org/licenses/LICENSE-2.0 - -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. - -import boto3 - -def create_sechub_insights(): - securityhub = boto3.client("securityhub") - - try: - activeInsight = securityhub.create_insight( - Name="ElectricEye Active Findings", - Filters={ - "ProductFields": [ - {"Key": "ProductName", "Value": "ElectricEye", "Comparison": "EQUALS"}, - ], - "RecordState": [{"Value": "ACTIVE", "Comparison": "EQUALS"}] - }, - GroupByAttribute="ResourceType" - ) - print(activeInsight) - except Exception as e: - print(e) - - try: - remediatedInsight = securityhub.create_insight( - Name="ElectricEye Remediated Findings", - Filters={ - "ProductFields": [ - {"Key": "ProductName", "Value": "ElectricEye", "Comparison": "EQUALS"}, - ], - "RecordState": [{"Value": "ARCHIVED", "Comparison": "EQUALS"}] - }, - GroupByAttribute="ResourceType" - ) - print(remediatedInsight) - except Exception as e: - print(e) - - try: - shodanInsight = securityhub.create_insight( - Name="ElectricEye Shodan Findings", - Filters={ - "ProductFields": [ - {"Key": "ProductName", "Value": "ElectricEye", "Comparison": "EQUALS"}, - ], - "ThreatIntelIndicatorSource": [{"Value": "Shodan.io", "Comparison": "EQUALS"}], - "RecordState": [{"Value": "ACTIVE", "Comparison": "EQUALS"}] - }, - GroupByAttribute="ResourceType" - ) - print(shodanInsight) - except Exception as e: - print(e) - - try: - easmInsight = securityhub.create_insight( - Name="ElectricEye EASM", - Filters={ - "ProductFields": [ - {"Key": "ProductName", "Value": "ElectricEye", "Comparison": "EQUALS"}, - ], - "Title": [{"Value": "[AttackSurface", "Comparison": "CONTAINS"}], - "RecordState": [{"Value": "ACTIVE", "Comparison": "EQUALS"}] - }, - GroupByAttribute="ResourceType" - ) - print(easmInsight) - except Exception as e: - print(e) \ No newline at end of file diff --git a/eeauditor/processor/outputs/control_objectives.json b/eeauditor/processor/outputs/control_objectives.json index 04446e78..61c933e0 100644 --- a/eeauditor/processor/outputs/control_objectives.json +++ b/eeauditor/processor/outputs/control_objectives.json @@ -409,7 +409,7 @@ }, { "ControlTitle":"CIS Critical Security Controls V8 13.5", - "ControlDescription":"Manage Access Control for Remote Assets: Manage access control for assets remotely connecting to enterprise resources. Determine amount of access to enterprise resources based on: up-to-date anti-malware software installed, configuration compliance with the enterprise's secure configuration process, and ensuring the operating system and applications are up-to-date.\t " + "ControlDescription":"Manage Access Control for Remote Assets: Manage access control for assets remotely connecting to enterprise resources. Determine amount of access to enterprise resources based on: up-to-date anti-malware software installed, configuration compliance with the enterprise's secure configuration process, and ensuring the operating system and applications are up-to-date.\t" }, { "ControlTitle":"CIS Critical Security Controls V8 13.6", @@ -937,7 +937,7 @@ }, { "ControlTitle":"NIST SP 800-53 Rev. 5 RA-5", - "ControlDescription":"Vulnerability Monitoring and Scanning: a. Monitor and scan for vulnerabilities in the system and hosted applications [Assignment: organization-defined frequency and/or randomly in accordance with organization-defined process] and when new vulnerabilities potentially affecting the system are identified and reported;, b. Employ vulnerability monitoring tools and techniques that facilitate interoperability among tools and automate parts of the vulnerability management process by using standards for:, 1. Enumerating platforms, software flaws, and improper configurations;, 2. Formatting checklists and test procedures; and, 3. Measuring vulnerability impact; " + "ControlDescription":"Vulnerability Monitoring and Scanning: a. Monitor and scan for vulnerabilities in the system and hosted applications [Assignment: organization-defined frequency and/or randomly in accordance with organization-defined process] and when new vulnerabilities potentially affecting the system are identified and reported;, b. Employ vulnerability monitoring tools and techniques that facilitate interoperability among tools and automate parts of the vulnerability management process by using standards for:, 1. Enumerating platforms, software flaws, and improper configurations;, 2. Formatting checklists and test procedures; and, 3. Measuring vulnerability impact;" }, { "ControlTitle":"NIST SP 800-53 Rev. 5 RA-7", @@ -1881,7 +1881,7 @@ }, { "ControlTitle":"CSA Cloud Controls Matrix V4.0 13.5", - "ControlDescription":"Manage Access Control for Remote Assets: Manage access control for assets remotely connecting to enterprise resources. Determine amount of access to enterprise resources based on: up-to-date anti-malware software installed, configuration compliance with the enterprise's secure configuration process, and ensuring the operating system and applications are up-to-date.\t " + "ControlDescription":"Manage Access Control for Remote Assets: Manage access control for assets remotely connecting to enterprise resources. Determine amount of access to enterprise resources based on: up-to-date anti-malware software installed, configuration compliance with the enterprise's secure configuration process, and ensuring the operating system and applications are up-to-date.\t" }, { "ControlTitle":"CSA Cloud Controls Matrix V4.0 13.6", @@ -2405,7 +2405,7 @@ }, { "ControlTitle":"UK NCSC Cyber Essentials V2.2 A6.2.1", - "ControlDescription":"Please list all internet browsers you use so that the assessor can understand your setup and verify that they are in support. For example: Chrome Version 89; Safari Version 14 " + "ControlDescription":"Please list all internet browsers you use so that the assessor can understand your setup and verify that they are in support. For example: Chrome Version 89; Safari Version 14" }, { "ControlTitle":"UK NCSC Cyber Essentials V2.2 A6.2.2", @@ -2465,11 +2465,11 @@ }, { "ControlTitle":"UK NCSC Cyber Essentials V2.2 A5.10", - "ControlDescription":"Device locking mechanisms such as biometric, password or PIN, need to be enabled to prevent unauthorised access to devices accessing organisational data or services., This a new requirement in Cyber Essentials. More information can be found in the 'Cyber Essentials requirement for Infrastructure v3.0' document. https://www.ncsc.gov.uk/files/Cyber-Essentials-Requirements-for-IT-infrastructure-3-0.pdf " + "ControlDescription":"Device locking mechanisms such as biometric, password or PIN, need to be enabled to prevent unauthorised access to devices accessing organisational data or services., This a new requirement in Cyber Essentials. More information can be found in the 'Cyber Essentials requirement for Infrastructure v3.0' document. https://www.ncsc.gov.uk/files/Cyber-Essentials-Requirements-for-IT-infrastructure-3-0.pdf" }, { "ControlTitle":"UK NCSC Cyber Essentials V2.2 A4.11", - "ControlDescription":"Your software firewall needs be configured and enabled at all times, even when sitting behind a physical/virtual boundary firewall in an office location. You can check this setting on Macs in the Security & Privacy section of System Preferences. On Windows laptops you can check this by going to Settings and searching for 'Windows firewall'. On Linux try 'ufw status'. " + "ControlDescription":"Your software firewall needs be configured and enabled at all times, even when sitting behind a physical/virtual boundary firewall in an office location. You can check this setting on Macs in the Security & Privacy section of System Preferences. On Windows laptops you can check this by going to Settings and searching for 'Windows firewall'. On Linux try 'ufw status'. " }, { "ControlTitle":"UK NCSC Cyber Essentials V2.2 A4.5", @@ -2485,7 +2485,7 @@ }, { "ControlTitle":"UK NCSC Cyber Essentials V2.2 A4.1", - "ControlDescription":"You must have firewalls in place between your office network and the internet. " + "ControlDescription":"You must have firewalls in place between your office network and the internet. " }, { "ControlTitle":"UK NCSC Cyber Essentials V2.2 A4.1.1", @@ -2569,7 +2569,7 @@ }, { "ControlTitle":"UK NCSC Cyber Essentials V2.2 A6.5.2", - "ControlDescription":"Please indicate how updates are applied when auto updates has not been configured. " + "ControlDescription":"Please indicate how updates are applied when auto updates has not been configured. " }, { "ControlTitle":"UK NCSC Cyber Essentials V2.2 A6.4.1", @@ -4493,7 +4493,7 @@ }, { "ControlTitle":"NZISM V3.5 17.5.6. Using SSH (CID:2647)", - "ControlDescription":"The table below outlines the settings that SHOULD be implemented when using SSH., Configuration description, Configuration directive Disallow the use of SSH version 1, Protocol 2, On machines with multiple interfaces, configure the SSH daemon to listen only on the required interfaces, ListenAddress xxx.xxx.xxx.xxx, Disable connection forwarding, AllowTCPForwarding no, Disable gateway ports, Gatewayports no, Disable the ability to login directly as root, PermitRootLogin no, Disable host-based authentication, HostbasedAuthentication no, Disable rhosts-based authentication, RhostsAuthentication noIgnoreRhosts yes, Do not allow empty passwords, PermitEmptyPasswords no, Configure a suitable login banner, Banner/directory/filename, Configure a login authentication timeout of no more than 60 seconds, LoginGraceTime xx, Disable X forwarding , X11Forwarding no " + "ControlDescription":"The table below outlines the settings that SHOULD be implemented when using SSH., Configuration description, Configuration directive Disallow the use of SSH version 1, Protocol 2, On machines with multiple interfaces, configure the SSH daemon to listen only on the required interfaces, ListenAddress xxx.xxx.xxx.xxx, Disable connection forwarding, AllowTCPForwarding no, Disable gateway ports, Gatewayports no, Disable the ability to login directly as root, PermitRootLogin no, Disable host-based authentication, HostbasedAuthentication no, Disable rhosts-based authentication, RhostsAuthentication noIgnoreRhosts yes, Do not allow empty passwords, PermitEmptyPasswords no, Configure a suitable login banner, Banner/directory/filename, Configure a login authentication timeout of no more than 60 seconds, LoginGraceTime xx, Disable X forwarding , X11Forwarding no" }, { "ControlTitle":"NZISM V3.5 17.8.10. Mode of operation (CID:2842)", @@ -5977,23 +5977,23 @@ }, { "ControlTitle":"ECB CROE 2.1.2.1-13", - "ControlDescription":"The FMI should ensure that, as part of its formal process to review and update its cyber resilience strategy and framework (including all policies, procedures and controls), a number of factors are considered, such as: " + "ControlDescription":"The FMI should ensure that, as part of its formal process to review and update its cyber resilience strategy and framework (including all policies, procedures and controls), a number of factors are considered, such as:" }, { "ControlTitle":"ECB CROE 2.1.2.1-13.a", - "ControlDescription":"(a) the current and evolving cyber threats (e.g. those associated with the supply chain, use of cloud services, social networking, mobile applications and the internet of things, etc.); " + "ControlDescription":"(a) the current and evolving cyber threats (e.g. those associated with the supply chain, use of cloud services, social networking, mobile applications and the internet of things, etc.);" }, { "ControlTitle":"ECB CROE 2.1.2.1-13.b", - "ControlDescription":"(b) threat intelligence on threat actors and new tactics, techniques and procedures which may specifically impact the FMI; " + "ControlDescription":"(b) threat intelligence on threat actors and new tactics, techniques and procedures which may specifically impact the FMI;" }, { "ControlTitle":"ECB CROE 2.1.2.1-13.c", - "ControlDescription":"(c) the results of risk assessments of the FMI's critical functions, key roles, processes, information assets, third-party service providers and interconnections; " + "ControlDescription":"(c) the results of risk assessments of the FMI's critical functions, key roles, processes, information assets, third-party service providers and interconnections;" }, { "ControlTitle":"ECB CROE 2.1.2.1-13.d", - "ControlDescription":"(d) actual cyber incidents that have impacted the FMI directly or external cyber incidents from the ecosystem; " + "ControlDescription":"(d) actual cyber incidents that have impacted the FMI directly or external cyber incidents from the ecosystem;" }, { "ControlTitle":"ECB CROE 2.1.2.1-13.e", @@ -6193,7 +6193,7 @@ }, { "ControlTitle":"ECB CROE 2.3.2.1-1", - "ControlDescription":"The FMI should implement a comprehensive and appropriate set of security controls that will allow it to achieve the security objectives needed to meet its business requirements. The FMI should implement these controls based on the identification of its critical functions, key roles, processes, information assets, third-party service providers and interconnections, as per the risk assessment in the identification phase. The security objectives may include ensuring: " + "ControlDescription":"The FMI should implement a comprehensive and appropriate set of security controls that will allow it to achieve the security objectives needed to meet its business requirements. The FMI should implement these controls based on the identification of its critical functions, key roles, processes, information assets, third-party service providers and interconnections, as per the risk assessment in the identification phase. The security objectives may include ensuring:" }, { "ControlTitle":"ECB CROE 2.3.2.1-1.a", @@ -6201,11 +6201,11 @@ }, { "ControlTitle":"ECB CROE 2.3.2.1-1.b", - "ControlDescription":"(b) the integrity of the information stored in its information systems, while both in use and transit; " + "ControlDescription":"(b) the integrity of the information stored in its information systems, while both in use and transit;" }, { "ControlTitle":"ECB CROE 2.3.2.1-1.c", - "ControlDescription":"(c) the protection, integrity, confidentiality and availability of data while at rest, in use and in transit; " + "ControlDescription":"(c) the protection, integrity, confidentiality and availability of data while at rest, in use and in transit;" }, { "ControlTitle":"ECB CROE 2.3.2.1-1.d", @@ -9273,7 +9273,7 @@ }, { "ControlTitle":"AICPA TSC CC1.1", - "ControlDescription":"COSO Principle 1: The entity demonstrates a commitment to integrity and ethical values. Sets the Tone at the Top—The board of directors and management, at all levels, demonstrate through their directives, actions, and behavior the importance of integrity and ethical values to support the functioning of the system of internal control. Establishes Standards of Conduct—The expectations of the board of directors and senior management concerning integrity and ethical values are defined in the entity's standards of conduct and understood at all levels of the entity and by outsourced service providers and business partners. Evaluates Adherence to Standards of Conduct—Processes are in place to evaluate the performance of individuals and teams against the entity's expected standards of conduct. Addresses Deviations in a Timely Manner—Deviations from the entity's expected standards of conduct are identified and remedied in a timely and consistent manner. Considers Contractors and Vendor Employees in Demonstrating Its Commitment—Management and the board of directors consider the use of contractors and vendor employees in its processes for establishing standards of conduct, evaluating adherence to those standards, and addressing deviations in a timely manner. " + "ControlDescription":"COSO Principle 1: The entity demonstrates a commitment to integrity and ethical values. Sets the Tone at the Top—The board of directors and management, at all levels, demonstrate through their directives, actions, and behavior the importance of integrity and ethical values to support the functioning of the system of internal control. Establishes Standards of Conduct—The expectations of the board of directors and senior management concerning integrity and ethical values are defined in the entity's standards of conduct and understood at all levels of the entity and by outsourced service providers and business partners. Evaluates Adherence to Standards of Conduct—Processes are in place to evaluate the performance of individuals and teams against the entity's expected standards of conduct. Addresses Deviations in a Timely Manner—Deviations from the entity's expected standards of conduct are identified and remedied in a timely and consistent manner. Considers Contractors and Vendor Employees in Demonstrating Its Commitment—Management and the board of directors consider the use of contractors and vendor employees in its processes for establishing standards of conduct, evaluating adherence to those standards, and addressing deviations in a timely manner." }, { "ControlTitle":"AICPA TSC CC1.2", @@ -9313,7 +9313,7 @@ }, { "ControlTitle":"AICPA TSC CC3.3", - "ControlDescription":"COSO Principle 8: The entity considers the potential for fraud in assessing risks to the achievement of objectives. Considers Various Types of Fraud—The assessment of fraud considers fraudulent reporting, possible loss of assets, and corruption resulting from the various ways that fraud and misconduct can occur. Assesses Incentives and Pressures—The assessment of fraud risks considers incentives and pressures. Assesses Opportunities—The assessment of fraud risk considers opportunities for unauthorized acquisition, use, or disposal of assets, altering the entity's reporting records, or committing other inappropriate acts. Assesses Attitudes and Rationalizations—The assessment of fraud risk considers how management and other personnel might engage in or justify inappropriate actions. Considers the Risks Related to the Use of IT and Access to Information—The assessment of fraud risks includes consideration of threats and vulnerabilities that arise specifically from the use of IT and access to information. " + "ControlDescription":"COSO Principle 8: The entity considers the potential for fraud in assessing risks to the achievement of objectives. Considers Various Types of Fraud—The assessment of fraud considers fraudulent reporting, possible loss of assets, and corruption resulting from the various ways that fraud and misconduct can occur. Assesses Incentives and Pressures—The assessment of fraud risks considers incentives and pressures. Assesses Opportunities—The assessment of fraud risk considers opportunities for unauthorized acquisition, use, or disposal of assets, altering the entity's reporting records, or committing other inappropriate acts. Assesses Attitudes and Rationalizations—The assessment of fraud risk considers how management and other personnel might engage in or justify inappropriate actions. Considers the Risks Related to the Use of IT and Access to Information—The assessment of fraud risks includes consideration of threats and vulnerabilities that arise specifically from the use of IT and access to information." }, { "ControlTitle":"AICPA TSC CC3.4", @@ -9393,7 +9393,7 @@ }, { "ControlTitle":"AICPA TSC CC8.1", - "ControlDescription":"The entity authorizes, designs, develops or acquires, configures, documents, tests, approves, and implements changes to infrastructure, data, software, and procedures to meet its objectives. Manages Changes Throughout the System Lifecycle—A process for managing system changes throughout the lifecycle of the system and its components (infrastructure, data, software and procedures) is used to support system availability and processing integrity. Authorizes Changes—A process is in place to authorize system changes prior to development. Designs and Develops Changes—A process is in place to design and develop system changes. Documents Changes—A process is in place to document system changes to support ongoing maintenance of the system and to support system users in performing their responsibilities. Tracks System Changes—A process is in place to track system changes prior to implementation. Configures Software—A process is in place to select and implement the configuration parameters used to control the functionality of software. Tests System Changes—A process is in place to test system changes prior to implementation. Approves System Changes—A process is in place to approve system changes prior to implementation. Deploys System Changes—A process is in place to implement system changes. Identifies and Evaluates System Changes—Objectives affected by system changes are identified, and the ability of the modified system to meet the objectives is evaluated throughout the system development life cycle. Identifies Changes in Infrastructure, Data, Software, and Procedures Required to Remediate Incidents—Changes in infrastructure, data, software, and procedures required to remediate incidents to continue to meet objectives are identified, and the change process is initiated upon identification. Creates Baseline Configuration of IT Technology—A baseline configuration of IT and control systems is created and maintained. Provides for Changes Necessary in Emergency Situations —A process is in place for authorizing, designing, testing, approving and implementing changes necessary in emergency situations (that is, changes that need to be implemented in an urgent timeframe). Protects Confidential Information—The entity protects confidential information during system design, development, testing, implementation, and change processes to meet the entity's objectives related to confidentiality. Protects Personal Information—The entity protects personal information during system design, development, testing, implementation, and change processes to meet the entity's objectives related to privacy. " + "ControlDescription":"The entity authorizes, designs, develops or acquires, configures, documents, tests, approves, and implements changes to infrastructure, data, software, and procedures to meet its objectives. Manages Changes Throughout the System Lifecycle—A process for managing system changes throughout the lifecycle of the system and its components (infrastructure, data, software and procedures) is used to support system availability and processing integrity. Authorizes Changes—A process is in place to authorize system changes prior to development. Designs and Develops Changes—A process is in place to design and develop system changes. Documents Changes—A process is in place to document system changes to support ongoing maintenance of the system and to support system users in performing their responsibilities. Tracks System Changes—A process is in place to track system changes prior to implementation. Configures Software—A process is in place to select and implement the configuration parameters used to control the functionality of software. Tests System Changes—A process is in place to test system changes prior to implementation. Approves System Changes—A process is in place to approve system changes prior to implementation. Deploys System Changes—A process is in place to implement system changes. Identifies and Evaluates System Changes—Objectives affected by system changes are identified, and the ability of the modified system to meet the objectives is evaluated throughout the system development life cycle. Identifies Changes in Infrastructure, Data, Software, and Procedures Required to Remediate Incidents—Changes in infrastructure, data, software, and procedures required to remediate incidents to continue to meet objectives are identified, and the change process is initiated upon identification. Creates Baseline Configuration of IT Technology—A baseline configuration of IT and control systems is created and maintained. Provides for Changes Necessary in Emergency Situations —A process is in place for authorizing, designing, testing, approving and implementing changes necessary in emergency situations (that is, changes that need to be implemented in an urgent timeframe). Protects Confidential Information—The entity protects confidential information during system design, development, testing, implementation, and change processes to meet the entity's objectives related to confidentiality. Protects Personal Information—The entity protects personal information during system design, development, testing, implementation, and change processes to meet the entity's objectives related to privacy." }, { "ControlTitle":"AICPA TSC CC9.1", @@ -9433,7 +9433,7 @@ }, { "ControlTitle":"Equifax SCF V1.0 AM-CS-3", - "ControlDescription":"Assets are securely disposed of, if no longer required. " + "ControlDescription":"Assets are securely disposed of, if no longer required. " }, { "ControlTitle":"Equifax SCF V1.0 AM-CS-4", @@ -9465,7 +9465,7 @@ }, { "ControlTitle":"Equifax SCF V1.0 RP-CS-2", - "ControlDescription":"The Company executes contingency plans, including business continuity and disaster recovery, during or after an event as deemed necessary for plan activation. " + "ControlDescription":"The Company executes contingency plans, including business continuity and disaster recovery, during or after an event as deemed necessary for plan activation." }, { "ControlTitle":"Equifax SCF V1.0 RP-CS-3", @@ -9509,11 +9509,11 @@ }, { "ControlTitle":"Equifax SCF V1.0 TC-CS-1", - "ControlDescription":"The Company maintains a Change Management Policy that identifies requirements for changes, roles, responsibilities, and management commitment. " + "ControlDescription":"The Company maintains a Change Management Policy that identifies requirements for changes, roles, responsibilities, and management commitment." }, { "ControlTitle":"Equifax SCF V1.0 TC-CS-2", - "ControlDescription":"The Company reviews proposed changes to systems and approves or disapproves changes after consideration for change type, business need, impact, and risk. " + "ControlDescription":"The Company reviews proposed changes to systems and approves or disapproves changes after consideration for change type, business need, impact, and risk." }, { "ControlTitle":"Equifax SCF V1.0 TC-CS-3", @@ -9529,7 +9529,7 @@ }, { "ControlTitle":"Equifax SCF V1.0 SC-CS-1", - "ControlDescription":"The Company reviews, responds, and incorporates legislative, statutory, and regulatory requirements regarding cybersecurity if determined to be applicable to The Company. " + "ControlDescription":"The Company reviews, responds, and incorporates legislative, statutory, and regulatory requirements regarding cybersecurity if determined to be applicable to The Company." }, { "ControlTitle":"Equifax SCF V1.0 SC-CS-2", @@ -9625,7 +9625,7 @@ }, { "ControlTitle":"Equifax SCF V1.0 GV-CS-14", - "ControlDescription":"For The Company systems deployed in its FedRAMP boundaries, a system security plan is established, maintained, and updated on a [INSERT TIME FRAME] basis. " + "ControlDescription":"For The Company systems deployed in its FedRAMP boundaries, a system security plan is established, maintained, and updated on a [INSERT TIME FRAME] basis." }, { "ControlTitle":"Equifax SCF V1.0 GV-CS-15", @@ -9681,7 +9681,7 @@ }, { "ControlTitle":"Equifax SCF V1.0 HR-CS-5", - "ControlDescription":"For The Company systems deployed, The Company maintains a Sanctions Process that is a two-tier administrative process and is designed to evaluate allegations. " + "ControlDescription":"For The Company systems deployed, The Company maintains a Sanctions Process that is a two-tier administrative process and is designed to evaluate allegations." }, { "ControlTitle":"Equifax SCF V1.0 AC-CS-1", @@ -9857,7 +9857,7 @@ }, { "ControlTitle":"Equifax SCF V1.0 NI-CS-3", - "ControlDescription":"The Company restricts unnecessary network services, ports, and protocols, and monitors communications at external network boundaries and between internal networks. " + "ControlDescription":"The Company restricts unnecessary network services, ports, and protocols, and monitors communications at external network boundaries and between internal networks." }, { "ControlTitle":"Equifax SCF V1.0 NI-CS-4", @@ -9869,7 +9869,7 @@ }, { "ControlTitle":"Equifax SCF V1.0 NI-CS-6", - "ControlDescription":"The Company regularly reviews and revises its segmentation strategy, policies, and methods on a [INSERT TIME FRAME] basis. " + "ControlDescription":"The Company regularly reviews and revises its segmentation strategy, policies, and methods on a [INSERT TIME FRAME] basis." }, { "ControlTitle":"Equifax SCF V1.0 NI-CS-7", @@ -9877,7 +9877,7 @@ }, { "ControlTitle":"Equifax SCF V1.0 NI-CS-8", - "ControlDescription":"The Company separates its network by function with the user network segmented from production, non-production and internet-facing networks. Network design and implementation considers the principles of least privilege. " + "ControlDescription":"The Company separates its network by function with the user network segmented from production, non-production and internet-facing networks. Network design and implementation considers the principles of least privilege." }, { "ControlTitle":"Equifax SCF V1.0 NI-CS-9", @@ -10009,7 +10009,7 @@ }, { "ControlTitle":"Equifax SCF V1.0 RM-CS-11", - "ControlDescription":"For The Company systems deployed in its FedRAMP boundaries, The Company performs a FIPS 199 analysis and categorization on an annual basis. " + "ControlDescription":"For The Company systems deployed in its FedRAMP boundaries, The Company performs a FIPS 199 analysis and categorization on an annual basis." }, { "ControlTitle":"Equifax SCF V1.0 RM-CS-2", @@ -10017,7 +10017,7 @@ }, { "ControlTitle":"Equifax SCF V1.0 RM-CS-3", - "ControlDescription":"The Company performs an Enterprise Security Risk Assessment to assess internal and external risks to the security, confidentiality, or integrity of personal information that could result in the unauthorized disclosure, misuse, loss, alteration, destruction, or other compromise of such information and documents those risks that are material. " + "ControlDescription":"The Company performs an Enterprise Security Risk Assessment to assess internal and external risks to the security, confidentiality, or integrity of personal information that could result in the unauthorized disclosure, misuse, loss, alteration, destruction, or other compromise of such information and documents those risks that are material." }, { "ControlTitle":"Equifax SCF V1.0 RM-CS-4", @@ -10081,7 +10081,7 @@ }, { "ControlTitle":"Equifax SCF V1.0 TD-CS-2", - "ControlDescription":"The Threat Intelligence team sources and provides actionable threat information, such as indicators of compromise, to relevant parties to enable monitoring for cyber threats. " + "ControlDescription":"The Threat Intelligence team sources and provides actionable threat information, such as indicators of compromise, to relevant parties to enable monitoring for cyber threats." }, { "ControlTitle":"Equifax SCF V1.0 TD-CS-3", @@ -10917,7 +10917,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1495", - "ControlDescription":"Firmware Corruption Mitigation : Prevent adversary access to privileged accounts or access necessary to perform this technique. Check the integrity of the existing BIOS and device firmware to determine if it is vulnerable to modification. Patch the BIOS and other firmware as necessary to prevent successful use of known vulnerabilities. " + "ControlDescription":"Firmware Corruption Mitigation : Prevent adversary access to privileged accounts or access necessary to perform this technique. Check the integrity of the existing BIOS and device firmware to determine if it is vulnerable to modification. Patch the BIOS and other firmware as necessary to prevent successful use of known vulnerabilities." }, { "ControlTitle":"MITRE ATT&CK T1188", @@ -10985,7 +10985,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1501", - "ControlDescription":"Systemd Service Mitigation : The creation and modification of systemd service unit files is generally reserved for administrators such as the Linux root user and other users with superuser privileges. Limit user access to system utilities such as systemctl to only users who have a legitimate need. Restrict read/write access to systemd unit files to only select privileged users who have a legitimate need to manage system services. Additionally, the installation of software commonly adds and changes systemd service unit files. Restrict software installation to trusted repositories only and be cautious of orphaned software packages. Utilize malicious code protection and application whitelisting to mitigate the ability of malware to create or modify systemd services. " + "ControlDescription":"Systemd Service Mitigation : The creation and modification of systemd service unit files is generally reserved for administrators such as the Linux root user and other users with superuser privileges. Limit user access to system utilities such as systemctl to only users who have a legitimate need. Restrict read/write access to systemd unit files to only select privileged users who have a legitimate need to manage system services. Additionally, the installation of software commonly adds and changes systemd service unit files. Restrict software installation to trusted repositories only and be cautious of orphaned software packages. Utilize malicious code protection and application whitelisting to mitigate the ability of malware to create or modify systemd services." }, { "ControlTitle":"MITRE ATT&CK T1158", @@ -11281,7 +11281,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1078", - "ControlDescription":"Valid Accounts Mitigation : Take measures to detect or prevent techniques such as [OS Credential Dumping](https://attack.mitre.org/techniques/T1003) or installation of keyloggers to acquire credentials through [Input Capture](https://attack.mitre.org/techniques/T1056). Limit credential overlap across systems to prevent access if account credentials are obtained. Ensure that local administrator accounts have complex, unique passwords across all systems on the network. Do not put user or admin domain accounts in the local administrator groups across systems unless they are tightly controlled and use of accounts is segmented, as this is often equivalent to having a local administrator account with the same password on all systems. Follow best practices for design and administration of an enterprise network to limit privileged account use across administrative tiers. (Citation: Microsoft Securing Privileged Access) Audit domain and local accounts as well as their permission levels routinely to look for situations that could allow an adversary to gain wide access by obtaining credentials of a privileged account. (Citation: TechNet Credential Theft) (Citation: TechNet Least Privilege) These audits should also include if default accounts have been enabled, or if new local accounts are created that have not be authorized. Applications and appliances that utilize default username and password should be changed immediately after the installation, and before deployment to a production environment. (Citation: US-CERT Alert TA13-175A Risks of Default Passwords on the Internet) When possible, applications that use SSH keys should be updated periodically and properly secured. " + "ControlDescription":"Valid Accounts Mitigation : Take measures to detect or prevent techniques such as [OS Credential Dumping](https://attack.mitre.org/techniques/T1003) or installation of keyloggers to acquire credentials through [Input Capture](https://attack.mitre.org/techniques/T1056). Limit credential overlap across systems to prevent access if account credentials are obtained. Ensure that local administrator accounts have complex, unique passwords across all systems on the network. Do not put user or admin domain accounts in the local administrator groups across systems unless they are tightly controlled and use of accounts is segmented, as this is often equivalent to having a local administrator account with the same password on all systems. Follow best practices for design and administration of an enterprise network to limit privileged account use across administrative tiers. (Citation: Microsoft Securing Privileged Access) Audit domain and local accounts as well as their permission levels routinely to look for situations that could allow an adversary to gain wide access by obtaining credentials of a privileged account. (Citation: TechNet Credential Theft) (Citation: TechNet Least Privilege) These audits should also include if default accounts have been enabled, or if new local accounts are created that have not be authorized. Applications and appliances that utilize default username and password should be changed immediately after the installation, and before deployment to a production environment. (Citation: US-CERT Alert TA13-175A Risks of Default Passwords on the Internet) When possible, applications that use SSH keys should be updated periodically and properly secured." }, { "ControlTitle":"MITRE ATT&CK T1133", @@ -11417,7 +11417,7 @@ }, { "ControlTitle":"MITRE ATT&CK TA0002", - "ControlDescription":"Execution : The adversary is trying to run malicious code.Execution consists of techniques that result in adversary-controlled code running on a local or remote system. Techniques that run malicious code are often paired with techniques from all other tactics to achieve broader goals, like exploring a network or stealing data. For example, an adversary might use a remote access tool to run a PowerShell script that does Remote System Discovery. " + "ControlDescription":"Execution : The adversary is trying to run malicious code.Execution consists of techniques that result in adversary-controlled code running on a local or remote system. Techniques that run malicious code are often paired with techniques from all other tactics to achieve broader goals, like exploring a network or stealing data. For example, an adversary might use a remote access tool to run a PowerShell script that does Remote System Discovery." }, { "ControlTitle":"MITRE ATT&CK TA0040", @@ -11425,19 +11425,19 @@ }, { "ControlTitle":"MITRE ATT&CK TA0003", - "ControlDescription":"Persistence : The adversary is trying to maintain their foothold.Persistence consists of techniques that adversaries use to keep access to systems across restarts, changed credentials, and other interruptions that could cut off their access. Techniques used for persistence include any access, action, or configuration changes that let them maintain their foothold on systems, such as replacing or hijacking legitimate code or adding startup code. " + "ControlDescription":"Persistence : The adversary is trying to maintain their foothold.Persistence consists of techniques that adversaries use to keep access to systems across restarts, changed credentials, and other interruptions that could cut off their access. Techniques used for persistence include any access, action, or configuration changes that let them maintain their foothold on systems, such as replacing or hijacking legitimate code or adding startup code." }, { "ControlTitle":"MITRE ATT&CK TA0004", - "ControlDescription":"Privilege Escalation : The adversary is trying to gain higher-level permissions.Privilege Escalation consists of techniques that adversaries use to gain higher-level permissions on a system or network. Adversaries can often enter and explore a network with unprivileged access but require elevated permissions to follow through on their objectives. Common approaches are to take advantage of system weaknesses, misconfigurations, and vulnerabilities. Examples of elevated access include: * SYSTEM/root level\n* local administrator\n* user account with admin-like access \n* user accounts with access to specific system or perform specific functionThese techniques often overlap with Persistence techniques, as OS features that let an adversary persist can execute in an elevated context. " + "ControlDescription":"Privilege Escalation : The adversary is trying to gain higher-level permissions.Privilege Escalation consists of techniques that adversaries use to gain higher-level permissions on a system or network. Adversaries can often enter and explore a network with unprivileged access but require elevated permissions to follow through on their objectives. Common approaches are to take advantage of system weaknesses, misconfigurations, and vulnerabilities. Examples of elevated access include: * SYSTEM/root level\n* local administrator\n* user account with admin-like access \n* user accounts with access to specific system or perform specific functionThese techniques often overlap with Persistence techniques, as OS features that let an adversary persist can execute in an elevated context. " }, { "ControlTitle":"MITRE ATT&CK TA0008", - "ControlDescription":"Lateral Movement : The adversary is trying to move through your environment.Lateral Movement consists of techniques that adversaries use to enter and control remote systems on a network. Following through on their primary objective often requires exploring the network to find their target and subsequently gaining access to it. Reaching their objective often involves pivoting through multiple systems and accounts to gain. Adversaries might install their own remote access tools to accomplish Lateral Movement or use legitimate credentials with native network and operating system tools, which may be stealthier. " + "ControlDescription":"Lateral Movement : The adversary is trying to move through your environment.Lateral Movement consists of techniques that adversaries use to enter and control remote systems on a network. Following through on their primary objective often requires exploring the network to find their target and subsequently gaining access to it. Reaching their objective often involves pivoting through multiple systems and accounts to gain. Adversaries might install their own remote access tools to accomplish Lateral Movement or use legitimate credentials with native network and operating system tools, which may be stealthier." }, { "ControlTitle":"MITRE ATT&CK TA0005", - "ControlDescription":"Defense Evasion : The adversary is trying to avoid being detected.Defense Evasion consists of techniques that adversaries use to avoid detection throughout their compromise. Techniques used for defense evasion include uninstalling/disabling security software or obfuscating/encrypting data and scripts. Adversaries also leverage and abuse trusted processes to hide and masquerade their malware. Other tactics' techniques are cross-listed here when those techniques include the added benefit of subverting defenses. " + "ControlDescription":"Defense Evasion : The adversary is trying to avoid being detected.Defense Evasion consists of techniques that adversaries use to avoid detection throughout their compromise. Techniques used for defense evasion include uninstalling/disabling security software or obfuscating/encrypting data and scripts. Adversaries also leverage and abuse trusted processes to hide and masquerade their malware. Other tactics' techniques are cross-listed here when those techniques include the added benefit of subverting defenses." }, { "ControlTitle":"MITRE ATT&CK TA0010", @@ -11445,7 +11445,7 @@ }, { "ControlTitle":"MITRE ATT&CK TA0007", - "ControlDescription":"Discovery : The adversary is trying to figure out your environment.Discovery consists of techniques an adversary may use to gain knowledge about the system and internal network. These techniques help adversaries observe the environment and orient themselves before deciding how to act. They also allow adversaries to explore what they can control and what's around their entry point in order to discover how it could benefit their current objective. Native operating system tools are often used toward this post-compromise information-gathering objective. " + "ControlDescription":"Discovery : The adversary is trying to figure out your environment.Discovery consists of techniques an adversary may use to gain knowledge about the system and internal network. These techniques help adversaries observe the environment and orient themselves before deciding how to act. They also allow adversaries to explore what they can control and what's around their entry point in order to discover how it could benefit their current objective. Native operating system tools are often used toward this post-compromise information-gathering objective." }, { "ControlTitle":"MITRE ATT&CK TA0009", @@ -11469,11 +11469,11 @@ }, { "ControlTitle":"MITRE ATT&CK T1055.011", - "ControlDescription":"Extra Window Memory Injection : Adversaries may inject malicious code into process via Extra Window Memory (EWM) in order to evade process-based defenses as well as possibly elevate privileges. EWM injection is a method of executing arbitrary code in the address space of a separate live process. Before creating a window, graphical Windows-based processes must prescribe to or register a windows class, which stipulate appearance and behavior (via windows procedures, which are functions that handle input/output of data).(Citation: Microsoft Window Classes) Registration of new windows classes can include a request for up to 40 bytes of EWM to be appended to the allocated memory of each instance of that class. This EWM is intended to store data specific to that window and has specific application programming interface (API) functions to set and get its value. (Citation: Microsoft GetWindowLong function) (Citation: Microsoft SetWindowLong function)Although small, the EWM is large enough to store a 32-bit pointer and is often used to point to a windows procedure. Malware may possibly utilize this memory location in part of an attack chain that includes writing code to shared sections of the process's memory, placing a pointer to the code in EWM, then invoking execution by returning execution control to the address in the process's EWM.Execution granted through EWM injection may allow access to both the target process's memory and possibly elevated privileges. Writing payloads to shared sections also avoids the use of highly monitored API calls such as WriteProcessMemory and CreateRemoteThread.(Citation: Elastic Process Injection July 2017) More sophisticated malware samples may also potentially bypass protection mechanisms such as data execution prevention (DEP) by triggering a combination of windows procedures and other system functions that will rewrite the malicious payload inside an executable portion of the target process. (Citation: MalwareTech Power Loader Aug 2013) (Citation: WeLiveSecurity Gapz and Redyms Mar 2013)Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via EWM injection may also evade detection from security products since the execution is masked under a legitimate process. " + "ControlDescription":"Extra Window Memory Injection : Adversaries may inject malicious code into process via Extra Window Memory (EWM) in order to evade process-based defenses as well as possibly elevate privileges. EWM injection is a method of executing arbitrary code in the address space of a separate live process. Before creating a window, graphical Windows-based processes must prescribe to or register a windows class, which stipulate appearance and behavior (via windows procedures, which are functions that handle input/output of data).(Citation: Microsoft Window Classes) Registration of new windows classes can include a request for up to 40 bytes of EWM to be appended to the allocated memory of each instance of that class. This EWM is intended to store data specific to that window and has specific application programming interface (API) functions to set and get its value. (Citation: Microsoft GetWindowLong function) (Citation: Microsoft SetWindowLong function)Although small, the EWM is large enough to store a 32-bit pointer and is often used to point to a windows procedure. Malware may possibly utilize this memory location in part of an attack chain that includes writing code to shared sections of the process's memory, placing a pointer to the code in EWM, then invoking execution by returning execution control to the address in the process's EWM.Execution granted through EWM injection may allow access to both the target process's memory and possibly elevated privileges. Writing payloads to shared sections also avoids the use of highly monitored API calls such as WriteProcessMemory and CreateRemoteThread.(Citation: Elastic Process Injection July 2017) More sophisticated malware samples may also potentially bypass protection mechanisms such as data execution prevention (DEP) by triggering a combination of windows procedures and other system functions that will rewrite the malicious payload inside an executable portion of the target process. (Citation: MalwareTech Power Loader Aug 2013) (Citation: WeLiveSecurity Gapz and Redyms Mar 2013)Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via EWM injection may also evade detection from security products since the execution is masked under a legitimate process." }, { "ControlTitle":"MITRE ATT&CK T1053.005", - "ControlDescription":"Scheduled Task : Adversaries may abuse the Windows Task Scheduler to perform task scheduling for initial or recurring execution of malicious code. There are multiple ways to access the Task Scheduler in Windows. The [schtasks](https://attack.mitre.org/software/S0111) utility can be run directly on the command line, or the Task Scheduler can be opened through the GUI within the Administrator Tools section of the Control Panel. In some cases, adversaries have used a .NET wrapper for the Windows Task Scheduler, and alternatively, adversaries have used the Windows netapi32 library to create a scheduled task.The deprecated [at](https://attack.mitre.org/software/S0110) utility could also be abused by adversaries (ex: [At](https://attack.mitre.org/techniques/T1053/002)), though at.exe can not access tasks created with schtasks or the Control Panel.An adversary may use Windows Task Scheduler to execute programs at system startup or on a scheduled basis for persistence. The Windows Task Scheduler can also be abused to conduct remote Execution as part of Lateral Movement and/or to run a process under the context of a specified account (such as SYSTEM). Similar to [System Binary Proxy Execution](https://attack.mitre.org/techniques/T1218), adversaries have also abused the Windows Task Scheduler to potentially mask one-time execution under signed/trusted system processes.(Citation: ProofPoint Serpent)Adversaries may also create 'hidden' scheduled tasks (i.e. [Hide Artifacts](https://attack.mitre.org/techniques/T1564)) that may not be visible to defender tools and manual queries used to enumerate tasks. Specifically, an adversary may hide a task from `schtasks /query` and the Task Scheduler by deleting the associated Security Descriptor (SD) registry value (where deletion of this value must be completed using SYSTEM permissions).(Citation: SigmaHQ)(Citation: Tarrask scheduled task) Adversaries may also employ alternate methods to hide tasks, such as altering the metadata (e.g., `Index` value) within associated registry keys.(Citation: Defending Against Scheduled Task Attacks in Windows Environments) " + "ControlDescription":"Scheduled Task : Adversaries may abuse the Windows Task Scheduler to perform task scheduling for initial or recurring execution of malicious code. There are multiple ways to access the Task Scheduler in Windows. The [schtasks](https://attack.mitre.org/software/S0111) utility can be run directly on the command line, or the Task Scheduler can be opened through the GUI within the Administrator Tools section of the Control Panel. In some cases, adversaries have used a .NET wrapper for the Windows Task Scheduler, and alternatively, adversaries have used the Windows netapi32 library to create a scheduled task.The deprecated [at](https://attack.mitre.org/software/S0110) utility could also be abused by adversaries (ex: [At](https://attack.mitre.org/techniques/T1053/002)), though at.exe can not access tasks created with schtasks or the Control Panel.An adversary may use Windows Task Scheduler to execute programs at system startup or on a scheduled basis for persistence. The Windows Task Scheduler can also be abused to conduct remote Execution as part of Lateral Movement and/or to run a process under the context of a specified account (such as SYSTEM). Similar to [System Binary Proxy Execution](https://attack.mitre.org/techniques/T1218), adversaries have also abused the Windows Task Scheduler to potentially mask one-time execution under signed/trusted system processes.(Citation: ProofPoint Serpent)Adversaries may also create 'hidden' scheduled tasks (i.e. [Hide Artifacts](https://attack.mitre.org/techniques/T1564)) that may not be visible to defender tools and manual queries used to enumerate tasks. Specifically, an adversary may hide a task from `schtasks /query` and the Task Scheduler by deleting the associated Security Descriptor (SD) registry value (where deletion of this value must be completed using SYSTEM permissions).(Citation: SigmaHQ)(Citation: Tarrask scheduled task) Adversaries may also employ alternate methods to hide tasks, such as altering the metadata (e.g., `Index` value) within associated registry keys.(Citation: Defending Against Scheduled Task Attacks in Windows Environments)" }, { "ControlTitle":"MITRE ATT&CK T1205.002", @@ -11505,7 +11505,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1027.011", - "ControlDescription":"Fileless Storage : Adversaries may store data in 'fileless' formats to conceal malicious activity from defenses. Fileless storage can be broadly defined as any format other than a file. Common examples of non-volatile fileless storage include the Windows Registry, event logs, or WMI repository.(Citation: Microsoft Fileless)(Citation: SecureList Fileless)Similar to fileless in-memory behaviors such as [Reflective Code Loading](https://attack.mitre.org/techniques/T1620) and [Process Injection](https://attack.mitre.org/techniques/T1055), fileless data storage may remain undetected by anti-virus and other endpoint security tools that can only access specific file formats from disk storage.Adversaries may use fileless storage to conceal various types of stored data, including payloads/shellcode (potentially being used as part of [Persistence](https://attack.mitre.org/tactics/TA0003)) and collected data not yet exfiltrated from the victim (e.g., [Local Data Staging](https://attack.mitre.org/techniques/T1074/001)). Adversaries also often encrypt, encode, splice, or otherwise obfuscate this fileless data when stored.Some forms of fileless storage activity may indirectly create artifacts in the file system, but in central and otherwise difficult to inspect formats such as the WMI (e.g., `%SystemRoot%/System32/Wbem/Repository`) or Registry (e.g., `%SystemRoot%/System32/Config`) physical files.(Citation: Microsoft Fileless) " + "ControlDescription":"Fileless Storage : Adversaries may store data in 'fileless' formats to conceal malicious activity from defenses. Fileless storage can be broadly defined as any format other than a file. Common examples of non-volatile fileless storage include the Windows Registry, event logs, or WMI repository.(Citation: Microsoft Fileless)(Citation: SecureList Fileless)Similar to fileless in-memory behaviors such as [Reflective Code Loading](https://attack.mitre.org/techniques/T1620) and [Process Injection](https://attack.mitre.org/techniques/T1055), fileless data storage may remain undetected by anti-virus and other endpoint security tools that can only access specific file formats from disk storage.Adversaries may use fileless storage to conceal various types of stored data, including payloads/shellcode (potentially being used as part of [Persistence](https://attack.mitre.org/tactics/TA0003)) and collected data not yet exfiltrated from the victim (e.g., [Local Data Staging](https://attack.mitre.org/techniques/T1074/001)). Adversaries also often encrypt, encode, splice, or otherwise obfuscate this fileless data when stored.Some forms of fileless storage activity may indirectly create artifacts in the file system, but in central and otherwise difficult to inspect formats such as the WMI (e.g., `%SystemRoot%/System32/Wbem/Repository`) or Registry (e.g., `%SystemRoot%/System32/Config`) physical files.(Citation: Microsoft Fileless)" }, { "ControlTitle":"MITRE ATT&CK T1067", @@ -11529,11 +11529,11 @@ }, { "ControlTitle":"MITRE ATT&CK T1218.011", - "ControlDescription":"Rundll32 : Adversaries may abuse rundll32.exe to proxy execution of malicious code. Using rundll32.exe, vice executing directly (i.e. [Shared Modules](https://attack.mitre.org/techniques/T1129)), may avoid triggering security tools that may not monitor execution of the rundll32.exe process because of allowlists or false positives from normal operations. Rundll32.exe is commonly associated with executing DLL payloads (ex: rundll32.exe {DLLname, DLLfunction}).Rundll32.exe can also be used to execute [Control Panel](https://attack.mitre.org/techniques/T1218/002) Item files (.cpl) through the undocumented shell32.dll functions Control_RunDLL and Control_RunDLLAsUser. Double-clicking a .cpl file also causes rundll32.exe to execute. (Citation: Trend Micro CPL)Rundll32 can also be used to execute scripts such as JavaScript. This can be done using a syntax similar to this: rundll32.exe javascript:'/../mshtml,RunHTMLApplication ';document.write();GetObject('script:https[:]//www[.]example[.]com/malicious.sct')' This behavior has been seen used by malware such as Poweliks. (Citation: This is Security Command Line Confusion)Adversaries may also attempt to obscure malicious code from analysis by abusing the manner in which rundll32.exe loads DLL function names. As part of Windows compatibility support for various character sets, rundll32.exe will first check for wide/Unicode then ANSI character-supported functions before loading the specified function (e.g., given the command rundll32.exe ExampleDLL.dll, ExampleFunction, rundll32.exe would first attempt to execute ExampleFunctionW, or failing that ExampleFunctionA, before loading ExampleFunction). Adversaries may therefore obscure malicious code by creating multiple identical exported function names and appending W and/or A to harmless ones.(Citation: Attackify Rundll32.exe Obscurity)(Citation: Github NoRunDll) DLL functions can also be exported and executed by an ordinal number (ex: rundll32.exe file.dll,#1).Additionally, adversaries may use [Masquerading](https://attack.mitre.org/techniques/T1036) techniques (such as changing DLL file names, file extensions, or function names) to further conceal execution of a malicious payload.(Citation: rundll32.exe defense evasion) " + "ControlDescription":"Rundll32 : Adversaries may abuse rundll32.exe to proxy execution of malicious code. Using rundll32.exe, vice executing directly (i.e. [Shared Modules](https://attack.mitre.org/techniques/T1129)), may avoid triggering security tools that may not monitor execution of the rundll32.exe process because of allowlists or false positives from normal operations. Rundll32.exe is commonly associated with executing DLL payloads (ex: rundll32.exe {DLLname, DLLfunction}).Rundll32.exe can also be used to execute [Control Panel](https://attack.mitre.org/techniques/T1218/002) Item files (.cpl) through the undocumented shell32.dll functions Control_RunDLL and Control_RunDLLAsUser. Double-clicking a .cpl file also causes rundll32.exe to execute. (Citation: Trend Micro CPL)Rundll32 can also be used to execute scripts such as JavaScript. This can be done using a syntax similar to this: rundll32.exe javascript:'/../mshtml,RunHTMLApplication ';document.write();GetObject('script:https[:]//www[.]example[.]com/malicious.sct')' This behavior has been seen used by malware such as Poweliks. (Citation: This is Security Command Line Confusion)Adversaries may also attempt to obscure malicious code from analysis by abusing the manner in which rundll32.exe loads DLL function names. As part of Windows compatibility support for various character sets, rundll32.exe will first check for wide/Unicode then ANSI character-supported functions before loading the specified function (e.g., given the command rundll32.exe ExampleDLL.dll, ExampleFunction, rundll32.exe would first attempt to execute ExampleFunctionW, or failing that ExampleFunctionA, before loading ExampleFunction). Adversaries may therefore obscure malicious code by creating multiple identical exported function names and appending W and/or A to harmless ones.(Citation: Attackify Rundll32.exe Obscurity)(Citation: Github NoRunDll) DLL functions can also be exported and executed by an ordinal number (ex: rundll32.exe file.dll,#1).Additionally, adversaries may use [Masquerading](https://attack.mitre.org/techniques/T1036) techniques (such as changing DLL file names, file extensions, or function names) to further conceal execution of a malicious payload.(Citation: rundll32.exe defense evasion)" }, { "ControlTitle":"MITRE ATT&CK T1613", - "ControlDescription":"Container and Resource Discovery : Adversaries may attempt to discover containers and other resources that are available within a containers environment. Other resources may include images, deployments, pods, nodes, and other information such as the status of a cluster.These resources can be viewed within web applications such as the Kubernetes dashboard or can be queried via the Docker and Kubernetes APIs.(Citation: Docker API)(Citation: Kubernetes API) In Docker, logs may leak information about the environment, such as the environment's configuration, which services are available, and what cloud provider the victim may be utilizing. The discovery of these resources may inform an adversary's next steps in the environment, such as how to perform lateral movement and which methods to utilize for execution. " + "ControlDescription":"Container and Resource Discovery : Adversaries may attempt to discover containers and other resources that are available within a containers environment. Other resources may include images, deployments, pods, nodes, and other information such as the status of a cluster.These resources can be viewed within web applications such as the Kubernetes dashboard or can be queried via the Docker and Kubernetes APIs.(Citation: Docker API)(Citation: Kubernetes API) In Docker, logs may leak information about the environment, such as the environment's configuration, which services are available, and what cloud provider the victim may be utilizing. The discovery of these resources may inform an adversary's next steps in the environment, such as how to perform lateral movement and which methods to utilize for execution." }, { "ControlTitle":"MITRE ATT&CK T1583.007", @@ -11581,11 +11581,11 @@ }, { "ControlTitle":"MITRE ATT&CK T1056.001", - "ControlDescription":"Keylogging : Adversaries may log user keystrokes to intercept credentials as the user types them. Keylogging is likely to be used to acquire credentials for new access opportunities when [OS Credential Dumping](https://attack.mitre.org/techniques/T1003) efforts are not effective, and may require an adversary to intercept keystrokes on a system for a substantial period of time before credentials can be successfully captured. In order to increase the likelihood of capturing credentials quickly, an adversary may also perform actions such as clearing browser cookies to force users to reauthenticate to systems.(Citation: Talos Kimsuky Nov 2021)Keylogging is the most prevalent type of input capture, with many different ways of intercepting keystrokes.(Citation: Adventures of a Keystroke) Some methods include:* Hooking API callbacks used for processing keystrokes. Unlike [Credential API Hooking](https://attack.mitre.org/techniques/T1056/004), this focuses solely on API functions intended for processing keystroke data.\n* Reading raw keystroke data from the hardware buffer.\n* Windows Registry modifications.\n* Custom drivers.\n* [Modify System Image](https://attack.mitre.org/techniques/T1601) may provide adversaries with hooks into the operating system of network devices to read raw keystrokes for login sessions.(Citation: Cisco Blog Legacy Device Attacks) " + "ControlDescription":"Keylogging : Adversaries may log user keystrokes to intercept credentials as the user types them. Keylogging is likely to be used to acquire credentials for new access opportunities when [OS Credential Dumping](https://attack.mitre.org/techniques/T1003) efforts are not effective, and may require an adversary to intercept keystrokes on a system for a substantial period of time before credentials can be successfully captured. In order to increase the likelihood of capturing credentials quickly, an adversary may also perform actions such as clearing browser cookies to force users to reauthenticate to systems.(Citation: Talos Kimsuky Nov 2021)Keylogging is the most prevalent type of input capture, with many different ways of intercepting keystrokes.(Citation: Adventures of a Keystroke) Some methods include:* Hooking API callbacks used for processing keystrokes. Unlike [Credential API Hooking](https://attack.mitre.org/techniques/T1056/004), this focuses solely on API functions intended for processing keystroke data.\n* Reading raw keystroke data from the hardware buffer.\n* Windows Registry modifications.\n* Custom drivers.\n* [Modify System Image](https://attack.mitre.org/techniques/T1601) may provide adversaries with hooks into the operating system of network devices to read raw keystrokes for login sessions.(Citation: Cisco Blog Legacy Device Attacks)" }, { "ControlTitle":"MITRE ATT&CK T1222.002", - "ControlDescription":"Linux and Mac File and Directory Permissions Modification : Adversaries may modify file or directory permissions/attributes to evade access control lists (ACLs) and access protected files.(Citation: Hybrid Analysis Icacls1 June 2018)(Citation: Hybrid Analysis Icacls2 May 2018) File and directory permissions are commonly managed by ACLs configured by the file or directory owner, or users with the appropriate permissions. File and directory ACL implementations vary by platform, but generally explicitly designate which users or groups can perform which actions (read, write, execute, etc.).Most Linux and Linux-based platforms provide a standard set of permission groups (user, group, and other) and a standard set of permissions (read, write, and execute) that are applied to each group. While nuances of each platform's permissions implementation may vary, most of the platforms provide two primary commands used to manipulate file and directory ACLs: chown (short for change owner), and chmod (short for change mode).Adversarial may use these commands to make themselves the owner of files and directories or change the mode if current permissions allow it. They could subsequently lock others out of the file. Specific file and directory modifications may be a required step for many techniques, such as establishing Persistence via [Unix Shell Configuration Modification](https://attack.mitre.org/techniques/T1546/004) or tainting/hijacking other instrumental binary/configuration files via [Hijack Execution Flow](https://attack.mitre.org/techniques/T1574).(Citation: 20 macOS Common Tools and Techniques) " + "ControlDescription":"Linux and Mac File and Directory Permissions Modification : Adversaries may modify file or directory permissions/attributes to evade access control lists (ACLs) and access protected files.(Citation: Hybrid Analysis Icacls1 June 2018)(Citation: Hybrid Analysis Icacls2 May 2018) File and directory permissions are commonly managed by ACLs configured by the file or directory owner, or users with the appropriate permissions. File and directory ACL implementations vary by platform, but generally explicitly designate which users or groups can perform which actions (read, write, execute, etc.).Most Linux and Linux-based platforms provide a standard set of permission groups (user, group, and other) and a standard set of permissions (read, write, and execute) that are applied to each group. While nuances of each platform's permissions implementation may vary, most of the platforms provide two primary commands used to manipulate file and directory ACLs: chown (short for change owner), and chmod (short for change mode).Adversarial may use these commands to make themselves the owner of files and directories or change the mode if current permissions allow it. They could subsequently lock others out of the file. Specific file and directory modifications may be a required step for many techniques, such as establishing Persistence via [Unix Shell Configuration Modification](https://attack.mitre.org/techniques/T1546/004) or tainting/hijacking other instrumental binary/configuration files via [Hijack Execution Flow](https://attack.mitre.org/techniques/T1574).(Citation: 20 macOS Common Tools and Techniques)" }, { "ControlTitle":"MITRE ATT&CK T1110.001", @@ -11689,7 +11689,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1543", - "ControlDescription":"Create or Modify System Process : Adversaries may create or modify system-level processes to repeatedly execute malicious payloads as part of persistence. When operating systems boot up, they can start processes that perform background system functions. On Windows and Linux, these system processes are referred to as services.(Citation: TechNet Services) On macOS, launchd processes known as [Launch Daemon](https://attack.mitre.org/techniques/T1543/004) and [Launch Agent](https://attack.mitre.org/techniques/T1543/001) are run to finish system initialization and load user specific parameters.(Citation: AppleDocs Launch Agent Daemons) Adversaries may install new services, daemons, or agents that can be configured to execute at startup or a repeatable interval in order to establish persistence. Similarly, adversaries may modify existing services, daemons, or agents to achieve the same effect. Services, daemons, or agents may be created with administrator privileges but executed under root/SYSTEM privileges. Adversaries may leverage this functionality to create or modify system processes in order to escalate privileges.(Citation: OSX Malware Detection) " + "ControlDescription":"Create or Modify System Process : Adversaries may create or modify system-level processes to repeatedly execute malicious payloads as part of persistence. When operating systems boot up, they can start processes that perform background system functions. On Windows and Linux, these system processes are referred to as services.(Citation: TechNet Services) On macOS, launchd processes known as [Launch Daemon](https://attack.mitre.org/techniques/T1543/004) and [Launch Agent](https://attack.mitre.org/techniques/T1543/001) are run to finish system initialization and load user specific parameters.(Citation: AppleDocs Launch Agent Daemons) Adversaries may install new services, daemons, or agents that can be configured to execute at startup or a repeatable interval in order to establish persistence. Similarly, adversaries may modify existing services, daemons, or agents to achieve the same effect. Services, daemons, or agents may be created with administrator privileges but executed under root/SYSTEM privileges. Adversaries may leverage this functionality to create or modify system processes in order to escalate privileges.(Citation: OSX Malware Detection) " }, { "ControlTitle":"MITRE ATT&CK T1133", @@ -11745,7 +11745,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1583.008", - "ControlDescription":"Malvertising : Adversaries may purchase online advertisements that can be abused to distribute malware to victims. Ads can be purchased to plant as well as favorably position artifacts in specific locations online, such as prominently placed within search engine results. These ads may make it more difficult for users to distinguish between actual search results and advertisements.(Citation: spamhaus-malvertising) Purchased ads may also target specific audiences using the advertising network's capabilities, potentially further taking advantage of the trust inherently given to search engines and popular websites. Adversaries may purchase ads and other resources to help distribute artifacts containing malicious code to victims. Purchased ads may attempt to impersonate or spoof well-known brands. For example, these spoofed ads may trick victims into clicking the ad which could then send them to a malicious domain that may be a clone of official websites containing trojanized versions of the advertised software.(Citation: Masquerads-Guardio)(Citation: FBI-search) Adversary's efforts to create malicious domains and purchase advertisements may also be automated at scale to better resist cleanup efforts.(Citation: sentinelone-malvertising) Malvertising may be used to support [Drive-by Target](https://attack.mitre.org/techniques/T1608/004) and [Drive-by Compromise](https://attack.mitre.org/techniques/T1189), potentially requiring limited interaction from the user if the ad contains code/exploits that infect the target system's web browser.(Citation: BBC-malvertising)Adversaries may also employ several techniques to evade detection by the advertising network. For example, adversaries may dynamically route ad clicks to send automated crawler/policy enforcer traffic to benign sites while validating potential targets then sending victims referred from real ad clicks to malicious pages. This infection vector may therefore remain hidden from the ad network as well as any visitor not reaching the malicious sites with a valid identifier from clicking on the advertisement.(Citation: Masquerads-Guardio) Other tricks, such as intentional typos to avoid brand reputation monitoring, may also be used to evade automated detection.(Citation: spamhaus-malvertising) " + "ControlDescription":"Malvertising : Adversaries may purchase online advertisements that can be abused to distribute malware to victims. Ads can be purchased to plant as well as favorably position artifacts in specific locations online, such as prominently placed within search engine results. These ads may make it more difficult for users to distinguish between actual search results and advertisements.(Citation: spamhaus-malvertising) Purchased ads may also target specific audiences using the advertising network's capabilities, potentially further taking advantage of the trust inherently given to search engines and popular websites. Adversaries may purchase ads and other resources to help distribute artifacts containing malicious code to victims. Purchased ads may attempt to impersonate or spoof well-known brands. For example, these spoofed ads may trick victims into clicking the ad which could then send them to a malicious domain that may be a clone of official websites containing trojanized versions of the advertised software.(Citation: Masquerads-Guardio)(Citation: FBI-search) Adversary's efforts to create malicious domains and purchase advertisements may also be automated at scale to better resist cleanup efforts.(Citation: sentinelone-malvertising) Malvertising may be used to support [Drive-by Target](https://attack.mitre.org/techniques/T1608/004) and [Drive-by Compromise](https://attack.mitre.org/techniques/T1189), potentially requiring limited interaction from the user if the ad contains code/exploits that infect the target system's web browser.(Citation: BBC-malvertising)Adversaries may also employ several techniques to evade detection by the advertising network. For example, adversaries may dynamically route ad clicks to send automated crawler/policy enforcer traffic to benign sites while validating potential targets then sending victims referred from real ad clicks to malicious pages. This infection vector may therefore remain hidden from the ad network as well as any visitor not reaching the malicious sites with a valid identifier from clicking on the advertisement.(Citation: Masquerads-Guardio) Other tricks, such as intentional typos to avoid brand reputation monitoring, may also be used to evade automated detection.(Citation: spamhaus-malvertising)" }, { "ControlTitle":"MITRE ATT&CK T1069", @@ -11753,7 +11753,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1114", - "ControlDescription":"Email Collection : Adversaries may target user email to collect sensitive information. Emails may contain sensitive data, including trade secrets or personal information, that can prove valuable to adversaries. Adversaries can collect or forward email from mail servers or clients. " + "ControlDescription":"Email Collection : Adversaries may target user email to collect sensitive information. Emails may contain sensitive data, including trade secrets or personal information, that can prove valuable to adversaries. Adversaries can collect or forward email from mail servers or clients." }, { "ControlTitle":"MITRE ATT&CK T1003.002", @@ -11793,7 +11793,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1195.001", - "ControlDescription":"Compromise Software Dependencies and Development Tools : Adversaries may manipulate software dependencies and development tools prior to receipt by a final consumer for the purpose of data or system compromise. Applications often depend on external software to function properly. Popular open source projects that are used as dependencies in many applications may be targeted as a means to add malicious code to users of the dependency.(Citation: Trendmicro NPM Compromise) Targeting may be specific to a desired victim set or may be distributed to a broad set of consumers but only move on to additional tactics on specific victims. " + "ControlDescription":"Compromise Software Dependencies and Development Tools : Adversaries may manipulate software dependencies and development tools prior to receipt by a final consumer for the purpose of data or system compromise. Applications often depend on external software to function properly. Popular open source projects that are used as dependencies in many applications may be targeted as a means to add malicious code to users of the dependency.(Citation: Trendmicro NPM Compromise) Targeting may be specific to a desired victim set or may be distributed to a broad set of consumers but only move on to additional tactics on specific victims." }, { "ControlTitle":"MITRE ATT&CK T1588.004", @@ -11809,7 +11809,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1071.004", - "ControlDescription":"DNS : Adversaries may communicate using the Domain Name System (DNS) application layer protocol to avoid detection/network filtering by blending in with existing traffic. Commands to the remote system, and often the results of those commands, will be embedded within the protocol traffic between the client and server. The DNS protocol serves an administrative function in computer networking and thus may be very common in environments. DNS traffic may also be allowed even before network authentication is completed. DNS packets contain many fields and headers in which data can be concealed. Often known as DNS tunneling, adversaries may abuse DNS to communicate with systems under their control within a victim network while also mimicking normal, expected traffic.(Citation: PAN DNS Tunneling)(Citation: Medium DnsTunneling) " + "ControlDescription":"DNS : Adversaries may communicate using the Domain Name System (DNS) application layer protocol to avoid detection/network filtering by blending in with existing traffic. Commands to the remote system, and often the results of those commands, will be embedded within the protocol traffic between the client and server. The DNS protocol serves an administrative function in computer networking and thus may be very common in environments. DNS traffic may also be allowed even before network authentication is completed. DNS packets contain many fields and headers in which data can be concealed. Often known as DNS tunneling, adversaries may abuse DNS to communicate with systems under their control within a victim network while also mimicking normal, expected traffic.(Citation: PAN DNS Tunneling)(Citation: Medium DnsTunneling)" }, { "ControlTitle":"MITRE ATT&CK T1552.005", @@ -11881,7 +11881,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1555.001", - "ControlDescription":"Keychain : Adversaries may acquire credentials from Keychain. Keychain (or Keychain Services) is the macOS credential management system that stores account names, passwords, private keys, certificates, sensitive application data, payment data, and secure notes. There are three types of Keychains: Login Keychain, System Keychain, and Local Items (iCloud) Keychain. The default Keychain is the Login Keychain, which stores user passwords and information. The System Keychain stores items accessed by the operating system, such as items shared among users on a host. The Local Items (iCloud) Keychain is used for items synced with Apple's iCloud service. Keychains can be viewed and edited through the Keychain Access application or using the command-line utility security. Keychain files are located in ~/Library/Keychains/, /Library/Keychains/, and /Network/Library/Keychains/.(Citation: Keychain Services Apple)(Citation: Keychain Decryption Passware)(Citation: OSX Keychain Schaumann)Adversaries may gather user credentials from Keychain storage/memory. For example, the command security dump-keychain d will dump all Login Keychain credentials from ~/Library/Keychains/login.keychain-db. Adversaries may also directly read Login Keychain credentials from the ~/Library/Keychains/login.keychain file. Both methods require a password, where the default password for the Login Keychain is the current user's password to login to the macOS host.(Citation: External to DA, the OS X Way)(Citation: Empire Keychain Decrypt) " + "ControlDescription":"Keychain : Adversaries may acquire credentials from Keychain. Keychain (or Keychain Services) is the macOS credential management system that stores account names, passwords, private keys, certificates, sensitive application data, payment data, and secure notes. There are three types of Keychains: Login Keychain, System Keychain, and Local Items (iCloud) Keychain. The default Keychain is the Login Keychain, which stores user passwords and information. The System Keychain stores items accessed by the operating system, such as items shared among users on a host. The Local Items (iCloud) Keychain is used for items synced with Apple's iCloud service. Keychains can be viewed and edited through the Keychain Access application or using the command-line utility security. Keychain files are located in ~/Library/Keychains/, /Library/Keychains/, and /Network/Library/Keychains/.(Citation: Keychain Services Apple)(Citation: Keychain Decryption Passware)(Citation: OSX Keychain Schaumann)Adversaries may gather user credentials from Keychain storage/memory. For example, the command security dump-keychain d will dump all Login Keychain credentials from ~/Library/Keychains/login.keychain-db. Adversaries may also directly read Login Keychain credentials from the ~/Library/Keychains/login.keychain file. Both methods require a password, where the default password for the Login Keychain is the current user's password to login to the macOS host.(Citation: External to DA, the OS X Way)(Citation: Empire Keychain Decrypt) " }, { "ControlTitle":"MITRE ATT&CK T1547", @@ -11933,7 +11933,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1087.002", - "ControlDescription":"Domain Account : Adversaries may attempt to get a listing of domain accounts. This information can help adversaries determine which domain accounts exist to aid in follow-on behavior such as targeting specific accounts which possess particular privileges.Commands such as net user /domain and net group /domain of the [Net](https://attack.mitre.org/software/S0039) utility, dscacheutil -q groupon macOS, and ldapsearch on Linux can list domain users and groups. [PowerShell](https://attack.mitre.org/techniques/T1059/001) cmdlets including Get-ADUser and Get-ADGroupMember may enumerate members of Active Directory groups. " + "ControlDescription":"Domain Account : Adversaries may attempt to get a listing of domain accounts. This information can help adversaries determine which domain accounts exist to aid in follow-on behavior such as targeting specific accounts which possess particular privileges.Commands such as net user /domain and net group /domain of the [Net](https://attack.mitre.org/software/S0039) utility, dscacheutil -q groupon macOS, and ldapsearch on Linux can list domain users and groups. [PowerShell](https://attack.mitre.org/techniques/T1059/001) cmdlets including Get-ADUser and Get-ADGroupMember may enumerate members of Active Directory groups. " }, { "ControlTitle":"MITRE ATT&CK T1547.014", @@ -11969,7 +11969,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1484.002", - "ControlDescription":"Domain Trust Modification : Adversaries may add new domain trusts or modify the properties of existing domain trusts to evade defenses and/or elevate privileges. Domain trust details, such as whether or not a domain is federated, allow authentication and authorization properties to apply between domains for the purpose of accessing shared resources.(Citation: Microsoft - Azure AD Federation) These trust objects may include accounts, credentials, and other authentication material applied to servers, tokens, and domains.Manipulating the domain trusts may allow an adversary to escalate privileges and/or evade defenses by modifying settings to add objects which they control. For example, this may be used to forge [SAML Tokens](https://attack.mitre.org/techniques/T1606/002), without the need to compromise the signing certificate to forge new credentials. Instead, an adversary can manipulate domain trusts to add their own signing certificate. An adversary may also convert a domain to a federated domain, which may enable malicious trust modifications such as altering the claim issuance rules to log in any valid set of credentials as a specified user.(Citation: AADInternals zure AD Federated Domain) " + "ControlDescription":"Domain Trust Modification : Adversaries may add new domain trusts or modify the properties of existing domain trusts to evade defenses and/or elevate privileges. Domain trust details, such as whether or not a domain is federated, allow authentication and authorization properties to apply between domains for the purpose of accessing shared resources.(Citation: Microsoft - Azure AD Federation) These trust objects may include accounts, credentials, and other authentication material applied to servers, tokens, and domains.Manipulating the domain trusts may allow an adversary to escalate privileges and/or evade defenses by modifying settings to add objects which they control. For example, this may be used to forge [SAML Tokens](https://attack.mitre.org/techniques/T1606/002), without the need to compromise the signing certificate to forge new credentials. Instead, an adversary can manipulate domain trusts to add their own signing certificate. An adversary may also convert a domain to a federated domain, which may enable malicious trust modifications such as altering the claim issuance rules to log in any valid set of credentials as a specified user.(Citation: AADInternals zure AD Federated Domain)" }, { "ControlTitle":"MITRE ATT&CK T1573.001", @@ -12017,7 +12017,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1053.003", - "ControlDescription":"Cron : Adversaries may abuse the cron utility to perform task scheduling for initial or recurring execution of malicious code.(Citation: 20 macOS Common Tools and Techniques) The cron utility is a time-based job scheduler for Unix-like operating systems. The crontab file contains the schedule of cron entries to be run and the specified times for execution. Any crontab files are stored in operating system-specific file paths.An adversary may use cron in Linux or Unix environments to execute programs at system startup or on a scheduled basis for [Persistence](https://attack.mitre.org/tactics/TA0003). " + "ControlDescription":"Cron : Adversaries may abuse the cron utility to perform task scheduling for initial or recurring execution of malicious code.(Citation: 20 macOS Common Tools and Techniques) The cron utility is a time-based job scheduler for Unix-like operating systems. The crontab file contains the schedule of cron entries to be run and the specified times for execution. Any crontab files are stored in operating system-specific file paths.An adversary may use cron in Linux or Unix environments to execute programs at system startup or on a scheduled basis for [Persistence](https://attack.mitre.org/tactics/TA0003)." }, { "ControlTitle":"MITRE ATT&CK T1069.002", @@ -12041,7 +12041,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1499.004", - "ControlDescription":"Application or System Exploitation : Adversaries may exploit software vulnerabilities that can cause an application or system to crash and deny availability to users. (Citation: Sucuri BIND9 August 2015) Some systems may automatically restart critical applications and services when crashes occur, but they can likely be re-exploited to cause a persistent denial of service (DoS) condition.Adversaries may exploit known or zero-day vulnerabilities to crash applications and/or systems, which may also lead to dependent applications and/or systems to be in a DoS condition. Crashed or restarted applications or systems may also have other effects such as [Data Destruction](https://attack.mitre.org/techniques/T1485), [Firmware Corruption](https://attack.mitre.org/techniques/T1495), [Service Stop](https://attack.mitre.org/techniques/T1489) etc. which may further cause a DoS condition and deny availability to critical information, applications and/or systems. " + "ControlDescription":"Application or System Exploitation : Adversaries may exploit software vulnerabilities that can cause an application or system to crash and deny availability to users. (Citation: Sucuri BIND9 August 2015) Some systems may automatically restart critical applications and services when crashes occur, but they can likely be re-exploited to cause a persistent denial of service (DoS) condition.Adversaries may exploit known or zero-day vulnerabilities to crash applications and/or systems, which may also lead to dependent applications and/or systems to be in a DoS condition. Crashed or restarted applications or systems may also have other effects such as [Data Destruction](https://attack.mitre.org/techniques/T1485), [Firmware Corruption](https://attack.mitre.org/techniques/T1495), [Service Stop](https://attack.mitre.org/techniques/T1489) etc. which may further cause a DoS condition and deny availability to critical information, applications and/or systems." }, { "ControlTitle":"MITRE ATT&CK T1137", @@ -12077,7 +12077,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1566.001", - "ControlDescription":"Spearphishing Attachment : Adversaries may send spearphishing emails with a malicious attachment in an attempt to gain access to victim systems. Spearphishing attachment is a specific variant of spearphishing. Spearphishing attachment is different from other forms of spearphishing in that it employs the use of malware attached to an email. All forms of spearphishing are electronically delivered social engineering targeted at a specific individual, company, or industry. In this scenario, adversaries attach a file to the spearphishing email and usually rely upon [User Execution](https://attack.mitre.org/techniques/T1204) to gain execution. Spearphishing may also involve social engineering techniques, such as posing as a trusted source.There are many options for the attachment such as Microsoft Office documents, executables, PDFs, or archived files. Upon opening the attachment (and potentially clicking past protections), the adversary's payload exploits a vulnerability or directly executes on the user's system. The text of the spearphishing email usually tries to give a plausible reason why the file should be opened, and may explain how to bypass system protections in order to do so. The email may also contain instructions on how to decrypt an attachment, such as a zip file password, in order to evade email boundary defenses. Adversaries frequently manipulate file extensions and icons in order to make attached executables appear to be document files, or files exploiting one application appear to be a file for a different one. " + "ControlDescription":"Spearphishing Attachment : Adversaries may send spearphishing emails with a malicious attachment in an attempt to gain access to victim systems. Spearphishing attachment is a specific variant of spearphishing. Spearphishing attachment is different from other forms of spearphishing in that it employs the use of malware attached to an email. All forms of spearphishing are electronically delivered social engineering targeted at a specific individual, company, or industry. In this scenario, adversaries attach a file to the spearphishing email and usually rely upon [User Execution](https://attack.mitre.org/techniques/T1204) to gain execution. Spearphishing may also involve social engineering techniques, such as posing as a trusted source.There are many options for the attachment such as Microsoft Office documents, executables, PDFs, or archived files. Upon opening the attachment (and potentially clicking past protections), the adversary's payload exploits a vulnerability or directly executes on the user's system. The text of the spearphishing email usually tries to give a plausible reason why the file should be opened, and may explain how to bypass system protections in order to do so. The email may also contain instructions on how to decrypt an attachment, such as a zip file password, in order to evade email boundary defenses. Adversaries frequently manipulate file extensions and icons in order to make attached executables appear to be document files, or files exploiting one application appear to be a file for a different one." }, { "ControlTitle":"MITRE ATT&CK T1214", @@ -12141,7 +12141,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1553.002", - "ControlDescription":"Code Signing : Adversaries may create, acquire, or steal code signing materials to sign their malware or tools. Code signing provides a level of authenticity on a binary from the developer and a guarantee that the binary has not been tampered with. (Citation: Wikipedia Code Signing) The certificates used during an operation may be created, acquired, or stolen by the adversary. (Citation: Securelist Digital Certificates) (Citation: Symantec Digital Certificates) Unlike [Invalid Code Signature](https://attack.mitre.org/techniques/T1036/001), this activity will result in a valid signature.Code signing to verify software on first run can be used on modern Windows and macOS systems. It is not used on Linux due to the decentralized nature of the platform. (Citation: Wikipedia Code Signing)(Citation: EclecticLightChecksonEXECodeSigning)Code signing certificates may be used to bypass security policies that require signed code to execute on a system. " + "ControlDescription":"Code Signing : Adversaries may create, acquire, or steal code signing materials to sign their malware or tools. Code signing provides a level of authenticity on a binary from the developer and a guarantee that the binary has not been tampered with. (Citation: Wikipedia Code Signing) The certificates used during an operation may be created, acquired, or stolen by the adversary. (Citation: Securelist Digital Certificates) (Citation: Symantec Digital Certificates) Unlike [Invalid Code Signature](https://attack.mitre.org/techniques/T1036/001), this activity will result in a valid signature.Code signing to verify software on first run can be used on modern Windows and macOS systems. It is not used on Linux due to the decentralized nature of the platform. (Citation: Wikipedia Code Signing)(Citation: EclecticLightChecksonEXECodeSigning)Code signing certificates may be used to bypass security policies that require signed code to execute on a system." }, { "ControlTitle":"MITRE ATT&CK T1530", @@ -12165,7 +12165,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1036.009", - "ControlDescription":"Break Process Trees : An adversary may attempt to evade process tree-based analysis by modifying executed malware's parent process ID (PPID). If endpoint protection software leverages the 'parent-child' relationship for detection, breaking this relationship could result in the adversary's behavior not being associated with previous process tree activity. On Unix-based systems breaking this process tree is common practice for administrators to execute software using scripts and programs.(Citation: 3OHA double-fork 2022) On Linux systems, adversaries may execute a series of [Native API](https://attack.mitre.org/techniques/T1106) calls to alter malware's process tree. For example, adversaries can execute their payload without any arguments, call the `fork()` API call twice, then have the parent process exit. This creates a grandchild process with no parent process that is immediately adopted by the `init` system process (PID 1), which successfully disconnects the execution of the adversary's payload from its previous process tree.Another example is using the 'daemon' syscall to detach from the current parent process and run in the background.(Citation: Sandfly BPFDoor 2022)(Citation: Microsoft XorDdos Linux Stealth 2022) " + "ControlDescription":"Break Process Trees : An adversary may attempt to evade process tree-based analysis by modifying executed malware's parent process ID (PPID). If endpoint protection software leverages the 'parent-child' relationship for detection, breaking this relationship could result in the adversary's behavior not being associated with previous process tree activity. On Unix-based systems breaking this process tree is common practice for administrators to execute software using scripts and programs.(Citation: 3OHA double-fork 2022) On Linux systems, adversaries may execute a series of [Native API](https://attack.mitre.org/techniques/T1106) calls to alter malware's process tree. For example, adversaries can execute their payload without any arguments, call the `fork()` API call twice, then have the parent process exit. This creates a grandchild process with no parent process that is immediately adopted by the `init` system process (PID 1), which successfully disconnects the execution of the adversary's payload from its previous process tree.Another example is using the 'daemon' syscall to detach from the current parent process and run in the background.(Citation: Sandfly BPFDoor 2022)(Citation: Microsoft XorDdos Linux Stealth 2022)" }, { "ControlTitle":"MITRE ATT&CK T1590.004", @@ -12181,11 +12181,11 @@ }, { "ControlTitle":"MITRE ATT&CK T1137.006", - "ControlDescription":"Add-ins : Adversaries may abuse Microsoft Office add-ins to obtain persistence on a compromised system. Office add-ins can be used to add functionality to Office programs. (Citation: Microsoft Office Add-ins) There are different types of add-ins that can be used by the various Office products; including Word/Excel add-in Libraries (WLL/XLL), VBA add-ins, Office Component Object Model (COM) add-ins, automation add-ins, VBA Editor (VBE), Visual Studio Tools for Office (VSTO) add-ins, and Outlook add-ins. (Citation: MRWLabs Office Persistence Add-ins)(Citation: FireEye Mail CDS 2018)Add-ins can be used to obtain persistence because they can be set to execute code when an Office application starts. " + "ControlDescription":"Add-ins : Adversaries may abuse Microsoft Office add-ins to obtain persistence on a compromised system. Office add-ins can be used to add functionality to Office programs. (Citation: Microsoft Office Add-ins) There are different types of add-ins that can be used by the various Office products; including Word/Excel add-in Libraries (WLL/XLL), VBA add-ins, Office Component Object Model (COM) add-ins, automation add-ins, VBA Editor (VBE), Visual Studio Tools for Office (VSTO) add-ins, and Outlook add-ins. (Citation: MRWLabs Office Persistence Add-ins)(Citation: FireEye Mail CDS 2018)Add-ins can be used to obtain persistence because they can be set to execute code when an Office application starts." }, { "ControlTitle":"MITRE ATT&CK T1505.002", - "ControlDescription":"Transport Agent : Adversaries may abuse Microsoft transport agents to establish persistent access to systems. Microsoft Exchange transport agents can operate on email messages passing through the transport pipeline to perform various tasks such as filtering spam, filtering malicious attachments, journaling, or adding a corporate signature to the end of all outgoing emails.(Citation: Microsoft TransportAgent Jun 2016)(Citation: ESET LightNeuron May 2019) Transport agents can be written by application developers and then compiled to .NET assemblies that are subsequently registered with the Exchange server. Transport agents will be invoked during a specified stage of email processing and carry out developer defined tasks. Adversaries may register a malicious transport agent to provide a persistence mechanism in Exchange Server that can be triggered by adversary-specified email events.(Citation: ESET LightNeuron May 2019) Though a malicious transport agent may be invoked for all emails passing through the Exchange transport pipeline, the agent can be configured to only carry out specific tasks in response to adversary defined criteria. For example, the transport agent may only carry out an action like copying in-transit attachments and saving them for later exfiltration if the recipient email address matches an entry on a list provided by the adversary. " + "ControlDescription":"Transport Agent : Adversaries may abuse Microsoft transport agents to establish persistent access to systems. Microsoft Exchange transport agents can operate on email messages passing through the transport pipeline to perform various tasks such as filtering spam, filtering malicious attachments, journaling, or adding a corporate signature to the end of all outgoing emails.(Citation: Microsoft TransportAgent Jun 2016)(Citation: ESET LightNeuron May 2019) Transport agents can be written by application developers and then compiled to .NET assemblies that are subsequently registered with the Exchange server. Transport agents will be invoked during a specified stage of email processing and carry out developer defined tasks. Adversaries may register a malicious transport agent to provide a persistence mechanism in Exchange Server that can be triggered by adversary-specified email events.(Citation: ESET LightNeuron May 2019) Though a malicious transport agent may be invoked for all emails passing through the Exchange transport pipeline, the agent can be configured to only carry out specific tasks in response to adversary defined criteria. For example, the transport agent may only carry out an action like copying in-transit attachments and saving them for later exfiltration if the recipient email address matches an entry on a list provided by the adversary." }, { "ControlTitle":"MITRE ATT&CK T1082", @@ -12193,7 +12193,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1071", - "ControlDescription":"Application Layer Protocol : Adversaries may communicate using OSI application layer protocols to avoid detection/network filtering by blending in with existing traffic. Commands to the remote system, and often the results of those commands, will be embedded within the protocol traffic between the client and server. Adversaries may utilize many different protocols, including those used for web browsing, transferring files, electronic mail, or DNS. For connections that occur internally within an enclave (such as those between a proxy or pivot node and other nodes), commonly used protocols are SMB, SSH, or RDP. " + "ControlDescription":"Application Layer Protocol : Adversaries may communicate using OSI application layer protocols to avoid detection/network filtering by blending in with existing traffic. Commands to the remote system, and often the results of those commands, will be embedded within the protocol traffic between the client and server. Adversaries may utilize many different protocols, including those used for web browsing, transferring files, electronic mail, or DNS. For connections that occur internally within an enclave (such as those between a proxy or pivot node and other nodes), commonly used protocols are SMB, SSH, or RDP." }, { "ControlTitle":"MITRE ATT&CK T1074.002", @@ -12353,7 +12353,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1055.003", - "ControlDescription":"Thread Execution Hijacking : Adversaries may inject malicious code into hijacked processes in order to evade process-based defenses as well as possibly elevate privileges. Thread Execution Hijacking is a method of executing arbitrary code in the address space of a separate live process. Thread Execution Hijacking is commonly performed by suspending an existing process then unmapping/hollowing its memory, which can then be replaced with malicious code or the path to a DLL. A handle to an existing victim process is first created with native Windows API calls such as OpenThread. At this point the process can be suspended then written to, realigned to the injected code, and resumed via SuspendThread , VirtualAllocEx, WriteProcessMemory, SetThreadContext, then ResumeThread respectively.(Citation: Elastic Process Injection July 2017)This is very similar to [Process Hollowing](https://attack.mitre.org/techniques/T1055/012) but targets an existing process rather than creating a process in a suspended state. Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via Thread Execution Hijacking may also evade detection from security products since the execution is masked under a legitimate process. " + "ControlDescription":"Thread Execution Hijacking : Adversaries may inject malicious code into hijacked processes in order to evade process-based defenses as well as possibly elevate privileges. Thread Execution Hijacking is a method of executing arbitrary code in the address space of a separate live process. Thread Execution Hijacking is commonly performed by suspending an existing process then unmapping/hollowing its memory, which can then be replaced with malicious code or the path to a DLL. A handle to an existing victim process is first created with native Windows API calls such as OpenThread. At this point the process can be suspended then written to, realigned to the injected code, and resumed via SuspendThread , VirtualAllocEx, WriteProcessMemory, SetThreadContext, then ResumeThread respectively.(Citation: Elastic Process Injection July 2017)This is very similar to [Process Hollowing](https://attack.mitre.org/techniques/T1055/012) but targets an existing process rather than creating a process in a suspended state. Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via Thread Execution Hijacking may also evade detection from security products since the execution is masked under a legitimate process." }, { "ControlTitle":"MITRE ATT&CK T1079", @@ -12381,7 +12381,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1037.002", - "ControlDescription":"Login Hook : Adversaries may use a Login Hook to establish persistence executed upon user logon. A login hook is a plist file that points to a specific script to execute with root privileges upon user logon. The plist file is located in the /Library/Preferences/com.apple.loginwindow.plist file and can be modified using the defaults command-line utility. This behavior is the same for logout hooks where a script can be executed upon user logout. All hooks require administrator permissions to modify or create hooks.(Citation: Login Scripts Apple Dev)(Citation: LoginWindowScripts Apple Dev) Adversaries can add or insert a path to a malicious script in the com.apple.loginwindow.plist file, using the LoginHook or LogoutHook key-value pair. The malicious script is executed upon the next user login. If a login hook already exists, adversaries can add additional commands to an existing login hook. There can be only one login and logout hook on a system at a time.(Citation: S1 macOs Persistence)(Citation: Wardle Persistence Chapter)**Note:** Login hooks were deprecated in 10.11 version of macOS in favor of [Launch Daemon](https://attack.mitre.org/techniques/T1543/004) and [Launch Agent](https://attack.mitre.org/techniques/T1543/001) " + "ControlDescription":"Login Hook : Adversaries may use a Login Hook to establish persistence executed upon user logon. A login hook is a plist file that points to a specific script to execute with root privileges upon user logon. The plist file is located in the /Library/Preferences/com.apple.loginwindow.plist file and can be modified using the defaults command-line utility. This behavior is the same for logout hooks where a script can be executed upon user logout. All hooks require administrator permissions to modify or create hooks.(Citation: Login Scripts Apple Dev)(Citation: LoginWindowScripts Apple Dev) Adversaries can add or insert a path to a malicious script in the com.apple.loginwindow.plist file, using the LoginHook or LogoutHook key-value pair. The malicious script is executed upon the next user login. If a login hook already exists, adversaries can add additional commands to an existing login hook. There can be only one login and logout hook on a system at a time.(Citation: S1 macOs Persistence)(Citation: Wardle Persistence Chapter)**Note:** Login hooks were deprecated in 10.11 version of macOS in favor of [Launch Daemon](https://attack.mitre.org/techniques/T1543/004) and [Launch Agent](https://attack.mitre.org/techniques/T1543/001)" }, { "ControlTitle":"MITRE ATT&CK T1659", @@ -12389,7 +12389,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1055", - "ControlDescription":"Process Injection : Adversaries may inject code into processes in order to evade process-based defenses as well as possibly elevate privileges. Process injection is a method of executing arbitrary code in the address space of a separate live process. Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via process injection may also evade detection from security products since the execution is masked under a legitimate process. There are many different ways to inject code into a process, many of which abuse legitimate functionalities. These implementations exist for every major OS but are typically platform specific. More sophisticated samples may perform multiple process injections to segment modules and further evade detection, utilizing named pipes or other inter-process communication (IPC) mechanisms as a communication channel. " + "ControlDescription":"Process Injection : Adversaries may inject code into processes in order to evade process-based defenses as well as possibly elevate privileges. Process injection is a method of executing arbitrary code in the address space of a separate live process. Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via process injection may also evade detection from security products since the execution is masked under a legitimate process. There are many different ways to inject code into a process, many of which abuse legitimate functionalities. These implementations exist for every major OS but are typically platform specific. More sophisticated samples may perform multiple process injections to segment modules and further evade detection, utilizing named pipes or other inter-process communication (IPC) mechanisms as a communication channel." }, { "ControlTitle":"MITRE ATT&CK T1567.004", @@ -12501,7 +12501,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1572", - "ControlDescription":"Protocol Tunneling : Adversaries may tunnel network communications to and from a victim system within a separate protocol to avoid detection/network filtering and/or enable access to otherwise unreachable systems. Tunneling involves explicitly encapsulating a protocol within another. This behavior may conceal malicious traffic by blending in with existing traffic and/or provide an outer layer of encryption (similar to a VPN). Tunneling could also enable routing of network packets that would otherwise not reach their intended destination, such as SMB, RDP, or other traffic that would be filtered by network appliances or not routed over the Internet. There are various means to encapsulate a protocol within another protocol. For example, adversaries may perform SSH tunneling (also known as SSH port forwarding), which involves forwarding arbitrary data over an encrypted SSH tunnel.(Citation: SSH Tunneling) [Protocol Tunneling](https://attack.mitre.org/techniques/T1572) may also be abused by adversaries during [Dynamic Resolution](https://attack.mitre.org/techniques/T1568). Known as DNS over HTTPS (DoH), queries to resolve C2 infrastructure may be encapsulated within encrypted HTTPS packets.(Citation: BleepingComp Godlua JUL19) Adversaries may also leverage [Protocol Tunneling](https://attack.mitre.org/techniques/T1572) in conjunction with [Proxy](https://attack.mitre.org/techniques/T1090) and/or [Protocol Impersonation](https://attack.mitre.org/techniques/T1001/003) to further conceal C2 communications and infrastructure. " + "ControlDescription":"Protocol Tunneling : Adversaries may tunnel network communications to and from a victim system within a separate protocol to avoid detection/network filtering and/or enable access to otherwise unreachable systems. Tunneling involves explicitly encapsulating a protocol within another. This behavior may conceal malicious traffic by blending in with existing traffic and/or provide an outer layer of encryption (similar to a VPN). Tunneling could also enable routing of network packets that would otherwise not reach their intended destination, such as SMB, RDP, or other traffic that would be filtered by network appliances or not routed over the Internet. There are various means to encapsulate a protocol within another protocol. For example, adversaries may perform SSH tunneling (also known as SSH port forwarding), which involves forwarding arbitrary data over an encrypted SSH tunnel.(Citation: SSH Tunneling) [Protocol Tunneling](https://attack.mitre.org/techniques/T1572) may also be abused by adversaries during [Dynamic Resolution](https://attack.mitre.org/techniques/T1568). Known as DNS over HTTPS (DoH), queries to resolve C2 infrastructure may be encapsulated within encrypted HTTPS packets.(Citation: BleepingComp Godlua JUL19) Adversaries may also leverage [Protocol Tunneling](https://attack.mitre.org/techniques/T1572) in conjunction with [Proxy](https://attack.mitre.org/techniques/T1090) and/or [Protocol Impersonation](https://attack.mitre.org/techniques/T1001/003) to further conceal C2 communications and infrastructure." }, { "ControlTitle":"MITRE ATT&CK T1218.002", @@ -12545,7 +12545,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1602.002", - "ControlDescription":"Network Device Configuration Dump : Adversaries may access network configuration files to collect sensitive data about the device and the network. The network configuration is a file containing parameters that determine the operation of the device. The device typically stores an in-memory copy of the configuration while operating, and a separate configuration on non-volatile storage to load after device reset. Adversaries can inspect the configuration files to reveal information about the target network and its layout, the network device and its software, or identifying legitimate accounts and credentials for later use.Adversaries can use common management tools and protocols, such as Simple Network Management Protocol (SNMP) and Smart Install (SMI), to access network configuration files.(Citation: US-CERT TA18-106A Network Infrastructure Devices 2018)(Citation: Cisco Blog Legacy Device Attacks) These tools may be used to query specific data from a configuration repository or configure the device to export the configuration for later analysis. " + "ControlDescription":"Network Device Configuration Dump : Adversaries may access network configuration files to collect sensitive data about the device and the network. The network configuration is a file containing parameters that determine the operation of the device. The device typically stores an in-memory copy of the configuration while operating, and a separate configuration on non-volatile storage to load after device reset. Adversaries can inspect the configuration files to reveal information about the target network and its layout, the network device and its software, or identifying legitimate accounts and credentials for later use.Adversaries can use common management tools and protocols, such as Simple Network Management Protocol (SNMP) and Smart Install (SMI), to access network configuration files.(Citation: US-CERT TA18-106A Network Infrastructure Devices 2018)(Citation: Cisco Blog Legacy Device Attacks) These tools may be used to query specific data from a configuration repository or configure the device to export the configuration for later analysis." }, { "ControlTitle":"MITRE ATT&CK T1589", @@ -12589,7 +12589,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1071.003", - "ControlDescription":"Mail Protocols : Adversaries may communicate using application layer protocols associated with electronic mail delivery to avoid detection/network filtering by blending in with existing traffic. Commands to the remote system, and often the results of those commands, will be embedded within the protocol traffic between the client and server. Protocols such as SMTP/S, POP3/S, and IMAP that carry electronic mail may be very common in environments. Packets produced from these protocols may have many fields and headers in which data can be concealed. Data could also be concealed within the email messages themselves. An adversary may abuse these protocols to communicate with systems under their control within a victim network while also mimicking normal, expected traffic. " + "ControlDescription":"Mail Protocols : Adversaries may communicate using application layer protocols associated with electronic mail delivery to avoid detection/network filtering by blending in with existing traffic. Commands to the remote system, and often the results of those commands, will be embedded within the protocol traffic between the client and server. Protocols such as SMTP/S, POP3/S, and IMAP that carry electronic mail may be very common in environments. Packets produced from these protocols may have many fields and headers in which data can be concealed. Data could also be concealed within the email messages themselves. An adversary may abuse these protocols to communicate with systems under their control within a victim network while also mimicking normal, expected traffic." }, { "ControlTitle":"MITRE ATT&CK T1556.007", @@ -12601,7 +12601,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1059.009", - "ControlDescription":"Cloud API : Adversaries may abuse cloud APIs to execute malicious commands. APIs available in cloud environments provide various functionalities and are a feature-rich method for programmatic access to nearly all aspects of a tenant. These APIs may be utilized through various methods such as command line interpreters (CLIs), in-browser Cloud Shells, [PowerShell](https://attack.mitre.org/techniques/T1059/001) modules like Azure for PowerShell(Citation: Microsoft - Azure PowerShell), or software developer kits (SDKs) available for languages such as [Python](https://attack.mitre.org/techniques/T1059/006). Cloud API functionality may allow for administrative access across all major services in a tenant such as compute, storage, identity and access management (IAM), networking, and security policies.With proper permissions (often via use of credentials such as [Application Access Token](https://attack.mitre.org/techniques/T1550/001) and [Web Session Cookie](https://attack.mitre.org/techniques/T1550/004)), adversaries may abuse cloud APIs to invoke various functions that execute malicious actions. For example, CLI and PowerShell functionality may be accessed through binaries installed on cloud-hosted or on-premises hosts or accessed through a browser-based cloud shell offered by many cloud platforms (such as AWS, Azure, and GCP). These cloud shells are often a packaged unified environment to use CLI and/or scripting modules hosted as a container in the cloud environment. " + "ControlDescription":"Cloud API : Adversaries may abuse cloud APIs to execute malicious commands. APIs available in cloud environments provide various functionalities and are a feature-rich method for programmatic access to nearly all aspects of a tenant. These APIs may be utilized through various methods such as command line interpreters (CLIs), in-browser Cloud Shells, [PowerShell](https://attack.mitre.org/techniques/T1059/001) modules like Azure for PowerShell(Citation: Microsoft - Azure PowerShell), or software developer kits (SDKs) available for languages such as [Python](https://attack.mitre.org/techniques/T1059/006). Cloud API functionality may allow for administrative access across all major services in a tenant such as compute, storage, identity and access management (IAM), networking, and security policies.With proper permissions (often via use of credentials such as [Application Access Token](https://attack.mitre.org/techniques/T1550/001) and [Web Session Cookie](https://attack.mitre.org/techniques/T1550/004)), adversaries may abuse cloud APIs to invoke various functions that execute malicious actions. For example, CLI and PowerShell functionality may be accessed through binaries installed on cloud-hosted or on-premises hosts or accessed through a browser-based cloud shell offered by many cloud platforms (such as AWS, Azure, and GCP). These cloud shells are often a packaged unified environment to use CLI and/or scripting modules hosted as a container in the cloud environment. " }, { "ControlTitle":"MITRE ATT&CK T1596", @@ -12661,7 +12661,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1557.003", - "ControlDescription":"DHCP Spoofing : Adversaries may redirect network traffic to adversary-owned systems by spoofing Dynamic Host Configuration Protocol (DHCP) traffic and acting as a malicious DHCP server on the victim network. By achieving the adversary-in-the-middle (AiTM) position, adversaries may collect network communications, including passed credentials, especially those sent over insecure, unencrypted protocols. This may also enable follow-on behaviors such as [Network Sniffing](https://attack.mitre.org/techniques/T1040) or [Transmitted Data Manipulation](https://attack.mitre.org/techniques/T1565/002).DHCP is based on a client-server model and has two functionalities: a protocol for providing network configuration settings from a DHCP server to a client and a mechanism for allocating network addresses to clients.(Citation: rfc2131) The typical server-client interaction is as follows: 1. The client broadcasts a `DISCOVER` message.2. The server responds with an `OFFER` message, which includes an available network address. 3. The client broadcasts a `REQUEST` message, which includes the network address offered. 4. The server acknowledges with an `ACK` message and the client receives the network configuration parameters.Adversaries may spoof as a rogue DHCP server on the victim network, from which legitimate hosts may receive malicious network configurations. For example, malware can act as a DHCP server and provide adversary-owned DNS servers to the victimized computers.(Citation: new_rogue_DHCP_serv_malware)(Citation: w32.tidserv.g) Through the malicious network configurations, an adversary may achieve the AiTM position, route client traffic through adversary-controlled systems, and collect information from the client network.DHCPv6 clients can receive network configuration information without being assigned an IP address by sending a INFORMATION-REQUEST (code 11) message to the All_DHCP_Relay_Agents_and_Servers multicast address.(Citation: rfc3315) Adversaries may use their rogue DHCP server to respond to this request message with malicious network configurations.Rather than establishing an AiTM position, adversaries may also abuse DHCP spoofing to perform a DHCP exhaustion attack (i.e, [Service Exhaustion Flood](https://attack.mitre.org/techniques/T1499/002)) by generating many broadcast DISCOVER messages to exhaust a network's DHCP allocation pool. " + "ControlDescription":"DHCP Spoofing : Adversaries may redirect network traffic to adversary-owned systems by spoofing Dynamic Host Configuration Protocol (DHCP) traffic and acting as a malicious DHCP server on the victim network. By achieving the adversary-in-the-middle (AiTM) position, adversaries may collect network communications, including passed credentials, especially those sent over insecure, unencrypted protocols. This may also enable follow-on behaviors such as [Network Sniffing](https://attack.mitre.org/techniques/T1040) or [Transmitted Data Manipulation](https://attack.mitre.org/techniques/T1565/002).DHCP is based on a client-server model and has two functionalities: a protocol for providing network configuration settings from a DHCP server to a client and a mechanism for allocating network addresses to clients.(Citation: rfc2131) The typical server-client interaction is as follows: 1. The client broadcasts a `DISCOVER` message.2. The server responds with an `OFFER` message, which includes an available network address. 3. The client broadcasts a `REQUEST` message, which includes the network address offered. 4. The server acknowledges with an `ACK` message and the client receives the network configuration parameters.Adversaries may spoof as a rogue DHCP server on the victim network, from which legitimate hosts may receive malicious network configurations. For example, malware can act as a DHCP server and provide adversary-owned DNS servers to the victimized computers.(Citation: new_rogue_DHCP_serv_malware)(Citation: w32.tidserv.g) Through the malicious network configurations, an adversary may achieve the AiTM position, route client traffic through adversary-controlled systems, and collect information from the client network.DHCPv6 clients can receive network configuration information without being assigned an IP address by sending a INFORMATION-REQUEST (code 11) message to the All_DHCP_Relay_Agents_and_Servers multicast address.(Citation: rfc3315) Adversaries may use their rogue DHCP server to respond to this request message with malicious network configurations.Rather than establishing an AiTM position, adversaries may also abuse DHCP spoofing to perform a DHCP exhaustion attack (i.e, [Service Exhaustion Flood](https://attack.mitre.org/techniques/T1499/002)) by generating many broadcast DISCOVER messages to exhaust a network's DHCP allocation pool." }, { "ControlTitle":"MITRE ATT&CK T1155", @@ -12673,7 +12673,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1027.001", - "ControlDescription":"Binary Padding : Adversaries may use binary padding to add junk data and change the on-disk representation of malware. This can be done without affecting the functionality or behavior of a binary, but can increase the size of the binary beyond what some security tools are capable of handling due to file size limitations. Binary padding effectively changes the checksum of the file and can also be used to avoid hash-based blocklists and static anti-virus signatures.(Citation: ESET OceanLotus) The padding used is commonly generated by a function to create junk data and then appended to the end or applied to sections of malware.(Citation: Securelist Malware Tricks April 2017) Increasing the file size may decrease the effectiveness of certain tools and detection capabilities that are not designed or configured to scan large files. This may also reduce the likelihood of being collected for analysis. Public file scanning services, such as VirusTotal, limits the maximum size of an uploaded file to be analyzed.(Citation: VirusTotal FAQ) " + "ControlDescription":"Binary Padding : Adversaries may use binary padding to add junk data and change the on-disk representation of malware. This can be done without affecting the functionality or behavior of a binary, but can increase the size of the binary beyond what some security tools are capable of handling due to file size limitations. Binary padding effectively changes the checksum of the file and can also be used to avoid hash-based blocklists and static anti-virus signatures.(Citation: ESET OceanLotus) The padding used is commonly generated by a function to create junk data and then appended to the end or applied to sections of malware.(Citation: Securelist Malware Tricks April 2017) Increasing the file size may decrease the effectiveness of certain tools and detection capabilities that are not designed or configured to scan large files. This may also reduce the likelihood of being collected for analysis. Public file scanning services, such as VirusTotal, limits the maximum size of an uploaded file to be analyzed.(Citation: VirusTotal FAQ)" }, { "ControlTitle":"MITRE ATT&CK T1505.003", @@ -12729,7 +12729,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1574.006", - "ControlDescription":"Dynamic Linker Hijacking : Adversaries may execute their own malicious payloads by hijacking environment variables the dynamic linker uses to load shared libraries. During the execution preparation phase of a program, the dynamic linker loads specified absolute paths of shared libraries from environment variables and files, such as LD_PRELOAD on Linux or DYLD_INSERT_LIBRARIES on macOS. Libraries specified in environment variables are loaded first, taking precedence over system libraries with the same function name.(Citation: Man LD.SO)(Citation: TLDP Shared Libraries)(Citation: Apple Doco Archive Dynamic Libraries) These variables are often used by developers to debug binaries without needing to recompile, deconflict mapped symbols, and implement custom functions without changing the original library.(Citation: Baeldung LD_PRELOAD)On Linux and macOS, hijacking dynamic linker variables may grant access to the victim process's memory, system/network resources, and possibly elevated privileges. This method may also evade detection from security products since the execution is masked under a legitimate process. Adversaries can set environment variables via the command line using the export command, setenv function, or putenv function. Adversaries can also leverage [Dynamic Linker Hijacking](https://attack.mitre.org/techniques/T1574/006) to export variables in a shell or set variables programmatically using higher level syntax such Python's os.environ.On Linux, adversaries may set LD_PRELOAD to point to malicious libraries that match the name of legitimate libraries which are requested by a victim program, causing the operating system to load the adversary's malicious code upon execution of the victim program. LD_PRELOAD can be set via the environment variable or /etc/ld.so.preload file.(Citation: Man LD.SO)(Citation: TLDP Shared Libraries) Libraries specified by LD_PRELOAD are loaded and mapped into memory by dlopen() and mmap() respectively.(Citation: Code Injection on Linux and macOS)(Citation: Uninformed Needle) (Citation: Phrack halfdead 1997)(Citation: Brown Exploiting Linkers) On macOS this behavior is conceptually the same as on Linux, differing only in how the macOS dynamic libraries (dyld) is implemented at a lower level. Adversaries can set the DYLD_INSERT_LIBRARIES environment variable to point to malicious libraries containing names of legitimate libraries or functions requested by a victim program.(Citation: TheEvilBit DYLD_INSERT_LIBRARIES)(Citation: Timac DYLD_INSERT_LIBRARIES)(Citation: Gabilondo DYLD_INSERT_LIBRARIES Catalina Bypass) " + "ControlDescription":"Dynamic Linker Hijacking : Adversaries may execute their own malicious payloads by hijacking environment variables the dynamic linker uses to load shared libraries. During the execution preparation phase of a program, the dynamic linker loads specified absolute paths of shared libraries from environment variables and files, such as LD_PRELOAD on Linux or DYLD_INSERT_LIBRARIES on macOS. Libraries specified in environment variables are loaded first, taking precedence over system libraries with the same function name.(Citation: Man LD.SO)(Citation: TLDP Shared Libraries)(Citation: Apple Doco Archive Dynamic Libraries) These variables are often used by developers to debug binaries without needing to recompile, deconflict mapped symbols, and implement custom functions without changing the original library.(Citation: Baeldung LD_PRELOAD)On Linux and macOS, hijacking dynamic linker variables may grant access to the victim process's memory, system/network resources, and possibly elevated privileges. This method may also evade detection from security products since the execution is masked under a legitimate process. Adversaries can set environment variables via the command line using the export command, setenv function, or putenv function. Adversaries can also leverage [Dynamic Linker Hijacking](https://attack.mitre.org/techniques/T1574/006) to export variables in a shell or set variables programmatically using higher level syntax such Python's os.environ.On Linux, adversaries may set LD_PRELOAD to point to malicious libraries that match the name of legitimate libraries which are requested by a victim program, causing the operating system to load the adversary's malicious code upon execution of the victim program. LD_PRELOAD can be set via the environment variable or /etc/ld.so.preload file.(Citation: Man LD.SO)(Citation: TLDP Shared Libraries) Libraries specified by LD_PRELOAD are loaded and mapped into memory by dlopen() and mmap() respectively.(Citation: Code Injection on Linux and macOS)(Citation: Uninformed Needle) (Citation: Phrack halfdead 1997)(Citation: Brown Exploiting Linkers) On macOS this behavior is conceptually the same as on Linux, differing only in how the macOS dynamic libraries (dyld) is implemented at a lower level. Adversaries can set the DYLD_INSERT_LIBRARIES environment variable to point to malicious libraries containing names of legitimate libraries or functions requested by a victim program.(Citation: TheEvilBit DYLD_INSERT_LIBRARIES)(Citation: Timac DYLD_INSERT_LIBRARIES)(Citation: Gabilondo DYLD_INSERT_LIBRARIES Catalina Bypass)" }, { "ControlTitle":"MITRE ATT&CK T1136.001", @@ -12753,7 +12753,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1222", - "ControlDescription":"File and Directory Permissions Modification : Adversaries may modify file or directory permissions/attributes to evade access control lists (ACLs) and access protected files.(Citation: Hybrid Analysis Icacls1 June 2018)(Citation: Hybrid Analysis Icacls2 May 2018) File and directory permissions are commonly managed by ACLs configured by the file or directory owner, or users with the appropriate permissions. File and directory ACL implementations vary by platform, but generally explicitly designate which users or groups can perform which actions (read, write, execute, etc.).Modifications may include changing specific access rights, which may require taking ownership of a file or directory and/or elevated permissions depending on the file or directory's existing permissions. This may enable malicious activity such as modifying, replacing, or deleting specific files or directories. Specific file and directory modifications may be a required step for many techniques, such as establishing Persistence via [Accessibility Features](https://attack.mitre.org/techniques/T1546/008), [Boot or Logon Initialization Scripts](https://attack.mitre.org/techniques/T1037), [Unix Shell Configuration Modification](https://attack.mitre.org/techniques/T1546/004), or tainting/hijacking other instrumental binary/configuration files via [Hijack Execution Flow](https://attack.mitre.org/techniques/T1574).Adversaries may also change permissions of symbolic links. For example, malware (particularly ransomware) may modify symbolic links and associated settings to enable access to files from local shortcuts with remote paths.(Citation: new_rust_based_ransomware)(Citation: bad_luck_blackcat)(Citation: falconoverwatch_blackcat_attack)(Citation: blackmatter_blackcat)(Citation: fsutil_behavior) " + "ControlDescription":"File and Directory Permissions Modification : Adversaries may modify file or directory permissions/attributes to evade access control lists (ACLs) and access protected files.(Citation: Hybrid Analysis Icacls1 June 2018)(Citation: Hybrid Analysis Icacls2 May 2018) File and directory permissions are commonly managed by ACLs configured by the file or directory owner, or users with the appropriate permissions. File and directory ACL implementations vary by platform, but generally explicitly designate which users or groups can perform which actions (read, write, execute, etc.).Modifications may include changing specific access rights, which may require taking ownership of a file or directory and/or elevated permissions depending on the file or directory's existing permissions. This may enable malicious activity such as modifying, replacing, or deleting specific files or directories. Specific file and directory modifications may be a required step for many techniques, such as establishing Persistence via [Accessibility Features](https://attack.mitre.org/techniques/T1546/008), [Boot or Logon Initialization Scripts](https://attack.mitre.org/techniques/T1037), [Unix Shell Configuration Modification](https://attack.mitre.org/techniques/T1546/004), or tainting/hijacking other instrumental binary/configuration files via [Hijack Execution Flow](https://attack.mitre.org/techniques/T1574).Adversaries may also change permissions of symbolic links. For example, malware (particularly ransomware) may modify symbolic links and associated settings to enable access to files from local shortcuts with remote paths.(Citation: new_rust_based_ransomware)(Citation: bad_luck_blackcat)(Citation: falconoverwatch_blackcat_attack)(Citation: blackmatter_blackcat)(Citation: fsutil_behavior)" }, { "ControlTitle":"MITRE ATT&CK T1003.001", @@ -12829,7 +12829,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1054", - "ControlDescription":"Indicator Blocking : An adversary may attempt to block indicators or events typically captured by sensors from being gathered and analyzed. This could include maliciously redirecting (Citation: Microsoft Lamin Sept 2017) or even disabling host-based sensors, such as Event Tracing for Windows (ETW),(Citation: Microsoft About Event Tracing 2018) by tampering settings that control the collection and flow of event telemetry. (Citation: Medium Event Tracing Tampering 2018) These settings may be stored on the system in configuration files and/or in the Registry as well as being accessible via administrative utilities such as [PowerShell](https://attack.mitre.org/techniques/T1086) or [Windows Management Instrumentation](https://attack.mitre.org/techniques/T1047).ETW interruption can be achieved multiple ways, however most directly by defining conditions using the PowerShell Set-EtwTraceProvider cmdlet or by interfacing directly with the registry to make alterations.In the case of network-based reporting of indicators, an adversary may block traffic associated with reporting to prevent central analysis. This may be accomplished by many means, such as stopping a local process responsible for forwarding telemetry and/or creating a host-based firewall rule to block traffic to specific hosts responsible for aggregating events, such as security information and event management (SIEM) products. " + "ControlDescription":"Indicator Blocking : An adversary may attempt to block indicators or events typically captured by sensors from being gathered and analyzed. This could include maliciously redirecting (Citation: Microsoft Lamin Sept 2017) or even disabling host-based sensors, such as Event Tracing for Windows (ETW),(Citation: Microsoft About Event Tracing 2018) by tampering settings that control the collection and flow of event telemetry. (Citation: Medium Event Tracing Tampering 2018) These settings may be stored on the system in configuration files and/or in the Registry as well as being accessible via administrative utilities such as [PowerShell](https://attack.mitre.org/techniques/T1086) or [Windows Management Instrumentation](https://attack.mitre.org/techniques/T1047).ETW interruption can be achieved multiple ways, however most directly by defining conditions using the PowerShell Set-EtwTraceProvider cmdlet or by interfacing directly with the registry to make alterations.In the case of network-based reporting of indicators, an adversary may block traffic associated with reporting to prevent central analysis. This may be accomplished by many means, such as stopping a local process responsible for forwarding telemetry and/or creating a host-based firewall rule to block traffic to specific hosts responsible for aggregating events, such as security information and event management (SIEM) products." }, { "ControlTitle":"MITRE ATT&CK T1598.004", @@ -12901,11 +12901,11 @@ }, { "ControlTitle":"MITRE ATT&CK T1055.013", - "ControlDescription":"Process Doppelg\u00e4nging : Adversaries may inject malicious code into process via process doppelg\u00e4nging in order to evade process-based defenses as well as possibly elevate privileges. Process doppelg\u00e4nging is a method of executing arbitrary code in the address space of a separate live process. Windows Transactional NTFS (TxF) was introduced in Vista as a method to perform safe file operations. (Citation: Microsoft TxF) To ensure data integrity, TxF enables only one transacted handle to write to a file at a given time. Until the write handle transaction is terminated, all other handles are isolated from the writer and may only read the committed version of the file that existed at the time the handle was opened. (Citation: Microsoft Basic TxF Concepts) To avoid corruption, TxF performs an automatic rollback if the system or application fails during a write transaction. (Citation: Microsoft Where to use TxF)Although deprecated, the TxF application programming interface (API) is still enabled as of Windows 10. (Citation: BlackHat Process Doppelg\u00e4nging Dec 2017)Adversaries may abuse TxF to a perform a file-less variation of [Process Injection](https://attack.mitre.org/techniques/T1055). Similar to [Process Hollowing](https://attack.mitre.org/techniques/T1055/012), process doppelg\u00e4nging involves replacing the memory of a legitimate process, enabling the veiled execution of malicious code that may evade defenses and detection. Process doppelg\u00e4nging's use of TxF also avoids the use of highly-monitored API functions such as NtUnmapViewOfSection, VirtualProtectEx, and SetThreadContext. (Citation: BlackHat Process Doppelg\u00e4nging Dec 2017)Process Doppelg\u00e4nging is implemented in 4 steps (Citation: BlackHat Process Doppelg\u00e4nging Dec 2017):* Transact Create a TxF transaction using a legitimate executable then overwrite the file with malicious code. These changes will be isolated and only visible within the context of the transaction.\n* Load Create a shared section of memory and load the malicious executable.\n* Rollback Undo changes to original executable, effectively removing malicious code from the file system.\n* Animate Create a process from the tainted section of memory and initiate execution.This behavior will likely not result in elevated privileges since the injected process was spawned from (and thus inherits the security context) of the injecting process. However, execution via process doppelg\u00e4nging may evade detection from security products since the execution is masked under a legitimate process. " + "ControlDescription":"Process Doppelg\u00e4nging : Adversaries may inject malicious code into process via process doppelg\u00e4nging in order to evade process-based defenses as well as possibly elevate privileges. Process doppelg\u00e4nging is a method of executing arbitrary code in the address space of a separate live process. Windows Transactional NTFS (TxF) was introduced in Vista as a method to perform safe file operations. (Citation: Microsoft TxF) To ensure data integrity, TxF enables only one transacted handle to write to a file at a given time. Until the write handle transaction is terminated, all other handles are isolated from the writer and may only read the committed version of the file that existed at the time the handle was opened. (Citation: Microsoft Basic TxF Concepts) To avoid corruption, TxF performs an automatic rollback if the system or application fails during a write transaction. (Citation: Microsoft Where to use TxF)Although deprecated, the TxF application programming interface (API) is still enabled as of Windows 10. (Citation: BlackHat Process Doppelg\u00e4nging Dec 2017)Adversaries may abuse TxF to a perform a file-less variation of [Process Injection](https://attack.mitre.org/techniques/T1055). Similar to [Process Hollowing](https://attack.mitre.org/techniques/T1055/012), process doppelg\u00e4nging involves replacing the memory of a legitimate process, enabling the veiled execution of malicious code that may evade defenses and detection. Process doppelg\u00e4nging's use of TxF also avoids the use of highly-monitored API functions such as NtUnmapViewOfSection, VirtualProtectEx, and SetThreadContext. (Citation: BlackHat Process Doppelg\u00e4nging Dec 2017)Process Doppelg\u00e4nging is implemented in 4 steps (Citation: BlackHat Process Doppelg\u00e4nging Dec 2017):* Transact Create a TxF transaction using a legitimate executable then overwrite the file with malicious code. These changes will be isolated and only visible within the context of the transaction.\n* Load Create a shared section of memory and load the malicious executable.\n* Rollback Undo changes to original executable, effectively removing malicious code from the file system.\n* Animate Create a process from the tainted section of memory and initiate execution.This behavior will likely not result in elevated privileges since the injected process was spawned from (and thus inherits the security context) of the injecting process. However, execution via process doppelg\u00e4nging may evade detection from security products since the execution is masked under a legitimate process." }, { "ControlTitle":"MITRE ATT&CK T1016", - "ControlDescription":"System Network Configuration Discovery : Adversaries may look for details about the network configuration and settings, such as IP and/or MAC addresses, of systems they access or through information discovery of remote systems. Several operating system administration utilities exist that can be used to gather this information. Examples include [Arp](https://attack.mitre.org/software/S0099), [ipconfig](https://attack.mitre.org/software/S0100)/[ifconfig](https://attack.mitre.org/software/S0101), [nbtstat](https://attack.mitre.org/software/S0102), and [route](https://attack.mitre.org/software/S0103).Adversaries may also leverage a [Network Device CLI](https://attack.mitre.org/techniques/T1059/008) on network devices to gather information about configurations and settings, such as IP addresses of configured interfaces and static/dynamic routes (e.g. show ip route, show ip interface).(Citation: US-CERT-TA18-106A)(Citation: Mandiant APT41 Global Intrusion )Adversaries may use the information from [System Network Configuration Discovery](https://attack.mitre.org/techniques/T1016) during automated discovery to shape follow-on behaviors, including determining certain access within the target network and what actions to do next. " + "ControlDescription":"System Network Configuration Discovery : Adversaries may look for details about the network configuration and settings, such as IP and/or MAC addresses, of systems they access or through information discovery of remote systems. Several operating system administration utilities exist that can be used to gather this information. Examples include [Arp](https://attack.mitre.org/software/S0099), [ipconfig](https://attack.mitre.org/software/S0100)/[ifconfig](https://attack.mitre.org/software/S0101), [nbtstat](https://attack.mitre.org/software/S0102), and [route](https://attack.mitre.org/software/S0103).Adversaries may also leverage a [Network Device CLI](https://attack.mitre.org/techniques/T1059/008) on network devices to gather information about configurations and settings, such as IP addresses of configured interfaces and static/dynamic routes (e.g. show ip route, show ip interface).(Citation: US-CERT-TA18-106A)(Citation: Mandiant APT41 Global Intrusion )Adversaries may use the information from [System Network Configuration Discovery](https://attack.mitre.org/techniques/T1016) during automated discovery to shape follow-on behaviors, including determining certain access within the target network and what actions to do next." }, { "ControlTitle":"MITRE ATT&CK T1578.003", @@ -12913,7 +12913,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1593.003", - "ControlDescription":"Code Repositories : Adversaries may search public code repositories for information about victims that can be used during targeting. Victims may store code in repositories on various third-party websites such as GitHub, GitLab, SourceForge, and BitBucket. Users typically interact with code repositories through a web application or command-line utilities such as git. Adversaries may search various public code repositories for various information about a victim. Public code repositories can often be a source of various general information about victims, such as commonly used programming languages and libraries as well as the names of employees. Adversaries may also identify more sensitive data, including accidentally leaked credentials or API keys.(Citation: GitHub Cloud Service Credentials) Information from these sources may reveal opportunities for other forms of reconnaissance (ex: [Phishing for Information](https://attack.mitre.org/techniques/T1598)), establishing operational resources (ex: [Compromise Accounts](https://attack.mitre.org/techniques/T1586) or [Compromise Infrastructure](https://attack.mitre.org/techniques/T1584)), and/or initial access (ex: [Valid Accounts](https://attack.mitre.org/techniques/T1078) or [Phishing](https://attack.mitre.org/techniques/T1566)). **Note:** This is distinct from [Code Repositories](https://attack.mitre.org/techniques/T1213/003), which focuses on [Collection](https://attack.mitre.org/tactics/TA0009) from private and internally hosted code repositories. " + "ControlDescription":"Code Repositories : Adversaries may search public code repositories for information about victims that can be used during targeting. Victims may store code in repositories on various third-party websites such as GitHub, GitLab, SourceForge, and BitBucket. Users typically interact with code repositories through a web application or command-line utilities such as git. Adversaries may search various public code repositories for various information about a victim. Public code repositories can often be a source of various general information about victims, such as commonly used programming languages and libraries as well as the names of employees. Adversaries may also identify more sensitive data, including accidentally leaked credentials or API keys.(Citation: GitHub Cloud Service Credentials) Information from these sources may reveal opportunities for other forms of reconnaissance (ex: [Phishing for Information](https://attack.mitre.org/techniques/T1598)), establishing operational resources (ex: [Compromise Accounts](https://attack.mitre.org/techniques/T1586) or [Compromise Infrastructure](https://attack.mitre.org/techniques/T1584)), and/or initial access (ex: [Valid Accounts](https://attack.mitre.org/techniques/T1078) or [Phishing](https://attack.mitre.org/techniques/T1566)). **Note:** This is distinct from [Code Repositories](https://attack.mitre.org/techniques/T1213/003), which focuses on [Collection](https://attack.mitre.org/tactics/TA0009) from private and internally hosted code repositories." }, { "ControlTitle":"MITRE ATT&CK T1574.005", @@ -12997,7 +12997,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1048.001", - "ControlDescription":"Exfiltration Over Symmetric Encrypted Non-C2 Protocol : Adversaries may steal data by exfiltrating it over a symmetrically encrypted network protocol other than that of the existing command and control channel. The data may also be sent to an alternate network location from the main command and control server. Symmetric encryption algorithms are those that use shared or the same keys/secrets on each end of the channel. This requires an exchange or pre-arranged agreement/possession of the value used to encrypt and decrypt data. Network protocols that use asymmetric encryption often utilize symmetric encryption once keys are exchanged, but adversaries may opt to manually share keys and implement symmetric cryptographic algorithms (ex: RC4, AES) vice using mechanisms that are baked into a protocol. This may result in multiple layers of encryption (in protocols that are natively encrypted such as HTTPS) or encryption in protocols that not typically encrypted (such as HTTP or FTP). " + "ControlDescription":"Exfiltration Over Symmetric Encrypted Non-C2 Protocol : Adversaries may steal data by exfiltrating it over a symmetrically encrypted network protocol other than that of the existing command and control channel. The data may also be sent to an alternate network location from the main command and control server. Symmetric encryption algorithms are those that use shared or the same keys/secrets on each end of the channel. This requires an exchange or pre-arranged agreement/possession of the value used to encrypt and decrypt data. Network protocols that use asymmetric encryption often utilize symmetric encryption once keys are exchanged, but adversaries may opt to manually share keys and implement symmetric cryptographic algorithms (ex: RC4, AES) vice using mechanisms that are baked into a protocol. This may result in multiple layers of encryption (in protocols that are natively encrypted such as HTTPS) or encryption in protocols that not typically encrypted (such as HTTP or FTP)." }, { "ControlTitle":"MITRE ATT&CK T1137.001", @@ -13033,7 +13033,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1055.004", - "ControlDescription":"Asynchronous Procedure Call : Adversaries may inject malicious code into processes via the asynchronous procedure call (APC) queue in order to evade process-based defenses as well as possibly elevate privileges. APC injection is a method of executing arbitrary code in the address space of a separate live process. APC injection is commonly performed by attaching malicious code to the APC Queue (Citation: Microsoft APC) of a process's thread. Queued APC functions are executed when the thread enters an alterable state.(Citation: Microsoft APC) A handle to an existing victim process is first created with native Windows API calls such as OpenThread. At this point QueueUserAPC can be used to invoke a function (such as LoadLibrayA pointing to a malicious DLL). A variation of APC injection, dubbed 'Early Bird injection', involves creating a suspended process in which malicious code can be written and executed before the process' entry point (and potentially subsequent anti-malware hooks) via an APC. (Citation: CyberBit Early Bird Apr 2018) AtomBombing (Citation: ENSIL AtomBombing Oct 2016) is another variation that utilizes APCs to invoke malicious code previously written to the global atom table.(Citation: Microsoft Atom Table)Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via APC injection may also evade detection from security products since the execution is masked under a legitimate process. " + "ControlDescription":"Asynchronous Procedure Call : Adversaries may inject malicious code into processes via the asynchronous procedure call (APC) queue in order to evade process-based defenses as well as possibly elevate privileges. APC injection is a method of executing arbitrary code in the address space of a separate live process. APC injection is commonly performed by attaching malicious code to the APC Queue (Citation: Microsoft APC) of a process's thread. Queued APC functions are executed when the thread enters an alterable state.(Citation: Microsoft APC) A handle to an existing victim process is first created with native Windows API calls such as OpenThread. At this point QueueUserAPC can be used to invoke a function (such as LoadLibrayA pointing to a malicious DLL). A variation of APC injection, dubbed 'Early Bird injection', involves creating a suspended process in which malicious code can be written and executed before the process' entry point (and potentially subsequent anti-malware hooks) via an APC. (Citation: CyberBit Early Bird Apr 2018) AtomBombing (Citation: ENSIL AtomBombing Oct 2016) is another variation that utilizes APCs to invoke malicious code previously written to the global atom table.(Citation: Microsoft Atom Table)Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via APC injection may also evade detection from security products since the execution is masked under a legitimate process." }, { "ControlTitle":"MITRE ATT&CK T1020.001", @@ -13049,7 +13049,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1546.009", - "ControlDescription":"AppCert DLLs : Adversaries may establish persistence and/or elevate privileges by executing malicious content triggered by AppCert DLLs loaded into processes. Dynamic-link libraries (DLLs) that are specified in the AppCertDLLs Registry key under HKEY_LOCAL_MACHINE/System/CurrentControlSet/Control/Session Manager/ are loaded into every process that calls the ubiquitously used application programming interface (API) functions CreateProcess, CreateProcessAsUser, CreateProcessWithLoginW, CreateProcessWithTokenW, or WinExec. (Citation: Elastic Process Injection July 2017)Similar to [Process Injection](https://attack.mitre.org/techniques/T1055), this value can be abused to obtain elevated privileges by causing a malicious DLL to be loaded and run in the context of separate processes on the computer. Malicious AppCert DLLs may also provide persistence by continuously being triggered by API activity. " + "ControlDescription":"AppCert DLLs : Adversaries may establish persistence and/or elevate privileges by executing malicious content triggered by AppCert DLLs loaded into processes. Dynamic-link libraries (DLLs) that are specified in the AppCertDLLs Registry key under HKEY_LOCAL_MACHINE/System/CurrentControlSet/Control/Session Manager/ are loaded into every process that calls the ubiquitously used application programming interface (API) functions CreateProcess, CreateProcessAsUser, CreateProcessWithLoginW, CreateProcessWithTokenW, or WinExec. (Citation: Elastic Process Injection July 2017)Similar to [Process Injection](https://attack.mitre.org/techniques/T1055), this value can be abused to obtain elevated privileges by causing a malicious DLL to be loaded and run in the context of separate processes on the computer. Malicious AppCert DLLs may also provide persistence by continuously being triggered by API activity." }, { "ControlTitle":"MITRE ATT&CK T1191", @@ -13061,7 +13061,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1114.003", - "ControlDescription":"Email Forwarding Rule : Adversaries may setup email forwarding rules to collect sensitive information. Adversaries may abuse email forwarding rules to monitor the activities of a victim, steal information, and further gain intelligence on the victim or the victim's organization to use as part of further exploits or operations.(Citation: US-CERT TA18-068A 2018) Furthermore, email forwarding rules can allow adversaries to maintain persistent access to victim's emails even after compromised credentials are reset by administrators.(Citation: Pfammatter - Hidden Inbox Rules) Most email clients allow users to create inbox rules for various email functions, including forwarding to a different recipient. These rules may be created through a local email application, a web interface, or by command-line interface. Messages can be forwarded to internal or external recipients, and there are no restrictions limiting the extent of this rule. Administrators may also create forwarding rules for user accounts with the same considerations and outcomes.(Citation: Microsoft Tim McMichael Exchange Mail Forwarding 2)(Citation: Mac Forwarding Rules)Any user or administrator within the organization (or adversary with valid credentials) can create rules to automatically forward all received messages to another recipient, forward emails to different locations based on the sender, and more. Adversaries may also hide the rule by making use of the Microsoft Messaging API (MAPI) to modify the rule properties, making it hidden and not visible from Outlook, OWA or most Exchange Administration tools.(Citation: Pfammatter - Hidden Inbox Rules)In some environments, administrators may be able to enable email forwarding rules that operate organization-wide rather than on individual inboxes. For example, Microsoft Exchange supports transport rules that evaluate all mail an organization receives against user-specified conditions, then performs a user-specified action on mail that adheres to those conditions.(Citation: Microsoft Mail Flow Rules 2023) Adversaries that abuse such features may be able to enable forwarding on all or specific mail an organization receives. " + "ControlDescription":"Email Forwarding Rule : Adversaries may setup email forwarding rules to collect sensitive information. Adversaries may abuse email forwarding rules to monitor the activities of a victim, steal information, and further gain intelligence on the victim or the victim's organization to use as part of further exploits or operations.(Citation: US-CERT TA18-068A 2018) Furthermore, email forwarding rules can allow adversaries to maintain persistent access to victim's emails even after compromised credentials are reset by administrators.(Citation: Pfammatter - Hidden Inbox Rules) Most email clients allow users to create inbox rules for various email functions, including forwarding to a different recipient. These rules may be created through a local email application, a web interface, or by command-line interface. Messages can be forwarded to internal or external recipients, and there are no restrictions limiting the extent of this rule. Administrators may also create forwarding rules for user accounts with the same considerations and outcomes.(Citation: Microsoft Tim McMichael Exchange Mail Forwarding 2)(Citation: Mac Forwarding Rules)Any user or administrator within the organization (or adversary with valid credentials) can create rules to automatically forward all received messages to another recipient, forward emails to different locations based on the sender, and more. Adversaries may also hide the rule by making use of the Microsoft Messaging API (MAPI) to modify the rule properties, making it hidden and not visible from Outlook, OWA or most Exchange Administration tools.(Citation: Pfammatter - Hidden Inbox Rules)In some environments, administrators may be able to enable email forwarding rules that operate organization-wide rather than on individual inboxes. For example, Microsoft Exchange supports transport rules that evaluate all mail an organization receives against user-specified conditions, then performs a user-specified action on mail that adheres to those conditions.(Citation: Microsoft Mail Flow Rules 2023) Adversaries that abuse such features may be able to enable forwarding on all or specific mail an organization receives." }, { "ControlTitle":"MITRE ATT&CK T1074", @@ -13101,7 +13101,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1612", - "ControlDescription":"Build Image on Host : Adversaries may build a container image directly on a host to bypass defenses that monitor for the retrieval of malicious images from a public registry. A remote build request may be sent to the Docker API that includes a Dockerfile that pulls a vanilla base image, such as alpine, from a public or local registry and then builds a custom image upon it.(Citation: Docker Build Image)An adversary may take advantage of that build API to build a custom image on the host that includes malware downloaded from their C2 server, and then they may utilize [Deploy Container](https://attack.mitre.org/techniques/T1610) using that custom image.(Citation: Aqua Build Images on Hosts)(Citation: Aqua Security Cloud Native Threat Report June 2021) If the base image is pulled from a public registry, defenses will likely not detect the image as malicious since it's a vanilla image. If the base image already resides in a local registry, the pull may be considered even less suspicious since the image is already in the environment. " + "ControlDescription":"Build Image on Host : Adversaries may build a container image directly on a host to bypass defenses that monitor for the retrieval of malicious images from a public registry. A remote build request may be sent to the Docker API that includes a Dockerfile that pulls a vanilla base image, such as alpine, from a public or local registry and then builds a custom image upon it.(Citation: Docker Build Image)An adversary may take advantage of that build API to build a custom image on the host that includes malware downloaded from their C2 server, and then they may utilize [Deploy Container](https://attack.mitre.org/techniques/T1610) using that custom image.(Citation: Aqua Build Images on Hosts)(Citation: Aqua Security Cloud Native Threat Report June 2021) If the base image is pulled from a public registry, defenses will likely not detect the image as malicious since it's a vanilla image. If the base image already resides in a local registry, the pull may be considered even less suspicious since the image is already in the environment." }, { "ControlTitle":"MITRE ATT&CK T1051", @@ -13109,11 +13109,11 @@ }, { "ControlTitle":"MITRE ATT&CK T1055.002", - "ControlDescription":"Portable Executable Injection : Adversaries may inject portable executables (PE) into processes in order to evade process-based defenses as well as possibly elevate privileges. PE injection is a method of executing arbitrary code in the address space of a separate live process. PE injection is commonly performed by copying code (perhaps without a file on disk) into the virtual address space of the target process before invoking it via a new thread. The write can be performed with native Windows API calls such as VirtualAllocEx and WriteProcessMemory, then invoked with CreateRemoteThread or additional code (ex: shellcode). The displacement of the injected code does introduce the additional requirement for functionality to remap memory references. (Citation: Elastic Process Injection July 2017) Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via PE injection may also evade detection from security products since the execution is masked under a legitimate process. " + "ControlDescription":"Portable Executable Injection : Adversaries may inject portable executables (PE) into processes in order to evade process-based defenses as well as possibly elevate privileges. PE injection is a method of executing arbitrary code in the address space of a separate live process. PE injection is commonly performed by copying code (perhaps without a file on disk) into the virtual address space of the target process before invoking it via a new thread. The write can be performed with native Windows API calls such as VirtualAllocEx and WriteProcessMemory, then invoked with CreateRemoteThread or additional code (ex: shellcode). The displacement of the injected code does introduce the additional requirement for functionality to remap memory references. (Citation: Elastic Process Injection July 2017) Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via PE injection may also evade detection from security products since the execution is masked under a legitimate process." }, { "ControlTitle":"MITRE ATT&CK T1218.012", - "ControlDescription":"Verclsid : Adversaries may abuse verclsid.exe to proxy execution of malicious code. Verclsid.exe is known as the Extension CLSID Verification Host and is responsible for verifying each shell extension before they are used by Windows Explorer or the Windows Shell.(Citation: WinOSBite verclsid.exe)Adversaries may abuse verclsid.exe to execute malicious payloads. This may be achieved by running verclsid.exe /S /C {CLSID}, where the file is referenced by a Class ID (CLSID), a unique identification number used to identify COM objects. COM payloads executed by verclsid.exe may be able to perform various malicious actions, such as loading and executing COM scriptlets (SCT) from remote servers (similar to [Regsvr32](https://attack.mitre.org/techniques/T1218/010)). Since the binary may be signed and/or native on Windows systems, proxying execution via verclsid.exe may bypass application control solutions that do not account for its potential abuse.(Citation: LOLBAS Verclsid)(Citation: Red Canary Verclsid.exe)(Citation: BOHOPS Abusing the COM Registry)(Citation: Nick Tyrer GitHub) " + "ControlDescription":"Verclsid : Adversaries may abuse verclsid.exe to proxy execution of malicious code. Verclsid.exe is known as the Extension CLSID Verification Host and is responsible for verifying each shell extension before they are used by Windows Explorer or the Windows Shell.(Citation: WinOSBite verclsid.exe)Adversaries may abuse verclsid.exe to execute malicious payloads. This may be achieved by running verclsid.exe /S /C {CLSID}, where the file is referenced by a Class ID (CLSID), a unique identification number used to identify COM objects. COM payloads executed by verclsid.exe may be able to perform various malicious actions, such as loading and executing COM scriptlets (SCT) from remote servers (similar to [Regsvr32](https://attack.mitre.org/techniques/T1218/010)). Since the binary may be signed and/or native on Windows systems, proxying execution via verclsid.exe may bypass application control solutions that do not account for its potential abuse.(Citation: LOLBAS Verclsid)(Citation: Red Canary Verclsid.exe)(Citation: BOHOPS Abusing the COM Registry)(Citation: Nick Tyrer GitHub)" }, { "ControlTitle":"MITRE ATT&CK T1586", @@ -13189,7 +13189,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1619", - "ControlDescription":"Cloud Storage Object Discovery : Adversaries may enumerate objects in cloud storage infrastructure. Adversaries may use this information during automated discovery to shape follow-on behaviors, including requesting all or specific objects from cloud storage. Similar to [File and Directory Discovery](https://attack.mitre.org/techniques/T1083) on a local host, after identifying available storage services (i.e. [Cloud Infrastructure Discovery](https://attack.mitre.org/techniques/T1580)) adversaries may access the contents/objects stored in cloud infrastructure.Cloud service providers offer APIs allowing users to enumerate objects stored within cloud storage. Examples include ListObjectsV2 in AWS (Citation: ListObjectsV2) and List Blobs in Azure(Citation: List Blobs) " + "ControlDescription":"Cloud Storage Object Discovery : Adversaries may enumerate objects in cloud storage infrastructure. Adversaries may use this information during automated discovery to shape follow-on behaviors, including requesting all or specific objects from cloud storage. Similar to [File and Directory Discovery](https://attack.mitre.org/techniques/T1083) on a local host, after identifying available storage services (i.e. [Cloud Infrastructure Discovery](https://attack.mitre.org/techniques/T1580)) adversaries may access the contents/objects stored in cloud infrastructure.Cloud service providers offer APIs allowing users to enumerate objects stored within cloud storage. Examples include ListObjectsV2 in AWS (Citation: ListObjectsV2) and List Blobs in Azure(Citation: List Blobs)" }, { "ControlTitle":"MITRE ATT&CK T1606.001", @@ -13205,11 +13205,11 @@ }, { "ControlTitle":"MITRE ATT&CK T1567.001", - "ControlDescription":"Exfiltration to Code Repository : Adversaries may exfiltrate data to a code repository rather than over their primary command and control channel. Code repositories are often accessible via an API (ex: https://api.github.com). Access to these APIs are often over HTTPS, which gives the adversary an additional level of protection.Exfiltration to a code repository can also provide a significant amount of cover to the adversary if it is a popular service already used by hosts within the network. " + "ControlDescription":"Exfiltration to Code Repository : Adversaries may exfiltrate data to a code repository rather than over their primary command and control channel. Code repositories are often accessible via an API (ex: https://api.github.com). Access to these APIs are often over HTTPS, which gives the adversary an additional level of protection.Exfiltration to a code repository can also provide a significant amount of cover to the adversary if it is a popular service already used by hosts within the network." }, { "ControlTitle":"MITRE ATT&CK T1021.007", - "ControlDescription":"Cloud Services : Adversaries may log into accessible cloud services within a compromised environment using [Valid Accounts](https://attack.mitre.org/techniques/T1078) that are synchronized with or federated to on-premises user identities. The adversary may then perform management actions or access cloud-hosted resources as the logged-on user. Many enterprises federate centrally managed user identities to cloud services, allowing users to login with their domain credentials in order to access the cloud control plane. Similarly, adversaries may connect to available cloud services through the web console or through the cloud command line interface (CLI) (e.g., [Cloud API](https://attack.mitre.org/techniques/T1059/009)), using commands such as Connect-AZAccount for Azure PowerShell, Connect-MgGraph for Microsoft Graph PowerShell, and gcloud auth login for the Google Cloud CLI.In some cases, adversaries may be able to authenticate to these services via [Application Access Token](https://attack.mitre.org/techniques/T1550/001) instead of a username and password. " + "ControlDescription":"Cloud Services : Adversaries may log into accessible cloud services within a compromised environment using [Valid Accounts](https://attack.mitre.org/techniques/T1078) that are synchronized with or federated to on-premises user identities. The adversary may then perform management actions or access cloud-hosted resources as the logged-on user. Many enterprises federate centrally managed user identities to cloud services, allowing users to login with their domain credentials in order to access the cloud control plane. Similarly, adversaries may connect to available cloud services through the web console or through the cloud command line interface (CLI) (e.g., [Cloud API](https://attack.mitre.org/techniques/T1059/009)), using commands such as Connect-AZAccount for Azure PowerShell, Connect-MgGraph for Microsoft Graph PowerShell, and gcloud auth login for the Google Cloud CLI.In some cases, adversaries may be able to authenticate to these services via [Application Access Token](https://attack.mitre.org/techniques/T1550/001) instead of a username and password." }, { "ControlTitle":"MITRE ATT&CK T1205.001", @@ -13225,7 +13225,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1528", - "ControlDescription":"Steal Application Access Token : Adversaries can steal application access tokens as a means of acquiring credentials to access remote systems and resources.Application access tokens are used to make authorized API requests on behalf of a user or service and are commonly used as a way to access resources in cloud and container-based applications and software-as-a-service (SaaS).(Citation: Auth0 - Why You Should Always Use Access Tokens to Secure APIs Sept 2019) OAuth is one commonly implemented framework that issues tokens to users for access to systems. Adversaries who steal account API tokens in cloud and containerized environments may be able to access data and perform actions with the permissions of these accounts, which can lead to privilege escalation and further compromise of the environment.In Kubernetes environments, processes running inside a container communicate with the Kubernetes API server using service account tokens. If a container is compromised, an attacker may be able to steal the container's token and thereby gain access to Kubernetes API commands.(Citation: Kubernetes Service Accounts)Token theft can also occur through social engineering, in which case user action may be required to grant access. An application desiring access to cloud-based services or protected APIs can gain entry using OAuth 2.0 through a variety of authorization protocols. An example commonly-used sequence is Microsoft's Authorization Code Grant flow.(Citation: Microsoft Identity Platform Protocols May 2019)(Citation: Microsoft - OAuth Code Authorization flow - June 2019) An OAuth access token enables a third-party application to interact with resources containing user data in the ways requested by the application without obtaining user credentials. \n \nAdversaries can leverage OAuth authorization by constructing a malicious application designed to be granted access to resources with the target user's OAuth token.(Citation: Amnesty OAuth Phishing Attacks, August 2019)(Citation: Trend Micro Pawn Storm OAuth 2017) The adversary will need to complete registration of their application with the authorization server, for example Microsoft Identity Platform using Azure Portal, the Visual Studio IDE, the command-line interface, PowerShell, or REST API calls.(Citation: Microsoft - Azure AD App Registration - May 2019) Then, they can send a [Spearphishing Link](https://attack.mitre.org/techniques/T1566/002) to the target user to entice them to grant access to the application. Once the OAuth access token is granted, the application can gain potentially long-term access to features of the user account through [Application Access Token](https://attack.mitre.org/techniques/T1550/001).(Citation: Microsoft - Azure AD Identity Tokens - Aug 2019)Application access tokens may function within a limited lifetime, limiting how long an adversary can utilize the stolen token. However, in some cases, adversaries can also steal application refresh tokens(Citation: Auth0 Understanding Refresh Tokens), allowing them to obtain new access tokens without prompting the user. " + "ControlDescription":"Steal Application Access Token : Adversaries can steal application access tokens as a means of acquiring credentials to access remote systems and resources.Application access tokens are used to make authorized API requests on behalf of a user or service and are commonly used as a way to access resources in cloud and container-based applications and software-as-a-service (SaaS).(Citation: Auth0 - Why You Should Always Use Access Tokens to Secure APIs Sept 2019) OAuth is one commonly implemented framework that issues tokens to users for access to systems. Adversaries who steal account API tokens in cloud and containerized environments may be able to access data and perform actions with the permissions of these accounts, which can lead to privilege escalation and further compromise of the environment.In Kubernetes environments, processes running inside a container communicate with the Kubernetes API server using service account tokens. If a container is compromised, an attacker may be able to steal the container's token and thereby gain access to Kubernetes API commands.(Citation: Kubernetes Service Accounts)Token theft can also occur through social engineering, in which case user action may be required to grant access. An application desiring access to cloud-based services or protected APIs can gain entry using OAuth 2.0 through a variety of authorization protocols. An example commonly-used sequence is Microsoft's Authorization Code Grant flow.(Citation: Microsoft Identity Platform Protocols May 2019)(Citation: Microsoft - OAuth Code Authorization flow - June 2019) An OAuth access token enables a third-party application to interact with resources containing user data in the ways requested by the application without obtaining user credentials. \n \nAdversaries can leverage OAuth authorization by constructing a malicious application designed to be granted access to resources with the target user's OAuth token.(Citation: Amnesty OAuth Phishing Attacks, August 2019)(Citation: Trend Micro Pawn Storm OAuth 2017) The adversary will need to complete registration of their application with the authorization server, for example Microsoft Identity Platform using Azure Portal, the Visual Studio IDE, the command-line interface, PowerShell, or REST API calls.(Citation: Microsoft - Azure AD App Registration - May 2019) Then, they can send a [Spearphishing Link](https://attack.mitre.org/techniques/T1566/002) to the target user to entice them to grant access to the application. Once the OAuth access token is granted, the application can gain potentially long-term access to features of the user account through [Application Access Token](https://attack.mitre.org/techniques/T1550/001).(Citation: Microsoft - Azure AD Identity Tokens - Aug 2019)Application access tokens may function within a limited lifetime, limiting how long an adversary can utilize the stolen token. However, in some cases, adversaries can also steal application refresh tokens(Citation: Auth0 Understanding Refresh Tokens), allowing them to obtain new access tokens without prompting the user. " }, { "ControlTitle":"MITRE ATT&CK T1598.002", @@ -13261,7 +13261,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1048.002", - "ControlDescription":"Exfiltration Over Asymmetric Encrypted Non-C2 Protocol : Adversaries may steal data by exfiltrating it over an asymmetrically encrypted network protocol other than that of the existing command and control channel. The data may also be sent to an alternate network location from the main command and control server. Asymmetric encryption algorithms are those that use different keys on each end of the channel. Also known as public-key cryptography, this requires pairs of cryptographic keys that can encrypt/decrypt data from the corresponding key. Each end of the communication channels requires a private key (only in the procession of that entity) and the public key of the other entity. The public keys of each entity are exchanged before encrypted communications begin. Network protocols that use asymmetric encryption (such as HTTPS/TLS/SSL) often utilize symmetric encryption once keys are exchanged. Adversaries may opt to use these encrypted mechanisms that are baked into a protocol. " + "ControlDescription":"Exfiltration Over Asymmetric Encrypted Non-C2 Protocol : Adversaries may steal data by exfiltrating it over an asymmetrically encrypted network protocol other than that of the existing command and control channel. The data may also be sent to an alternate network location from the main command and control server. Asymmetric encryption algorithms are those that use different keys on each end of the channel. Also known as public-key cryptography, this requires pairs of cryptographic keys that can encrypt/decrypt data from the corresponding key. Each end of the communication channels requires a private key (only in the procession of that entity) and the public key of the other entity. The public keys of each entity are exchanged before encrypted communications begin. Network protocols that use asymmetric encryption (such as HTTPS/TLS/SSL) often utilize symmetric encryption once keys are exchanged. Adversaries may opt to use these encrypted mechanisms that are baked into a protocol." }, { "ControlTitle":"MITRE ATT&CK T1087.004", @@ -13293,7 +13293,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1497.002", - "ControlDescription":"User Activity Based Checks : Adversaries may employ various user activity checks to detect and avoid virtualization and analysis environments. This may include changing behaviors based on the results of checks for the presence of artifacts indicative of a virtual machine environment (VME) or sandbox. If the adversary detects a VME, they may alter their malware to disengage from the victim or conceal the core functions of the implant. They may also search for VME artifacts before dropping secondary or additional payloads. Adversaries may use the information learned from [Virtualization/Sandbox Evasion](https://attack.mitre.org/techniques/T1497) during automated discovery to shape follow-on behaviors.(Citation: Deloitte Environment Awareness)Adversaries may search for user activity on the host based on variables such as the speed/frequency of mouse movements and clicks (Citation: Sans Virtual Jan 2016) , browser history, cache, bookmarks, or number of files in common directories such as home or the desktop. Other methods may rely on specific user interaction with the system before the malicious code is activated, such as waiting for a document to close before activating a macro (Citation: Unit 42 Sofacy Nov 2018) or waiting for a user to double click on an embedded image to activate.(Citation: FireEye FIN7 April 2017) " + "ControlDescription":"User Activity Based Checks : Adversaries may employ various user activity checks to detect and avoid virtualization and analysis environments. This may include changing behaviors based on the results of checks for the presence of artifacts indicative of a virtual machine environment (VME) or sandbox. If the adversary detects a VME, they may alter their malware to disengage from the victim or conceal the core functions of the implant. They may also search for VME artifacts before dropping secondary or additional payloads. Adversaries may use the information learned from [Virtualization/Sandbox Evasion](https://attack.mitre.org/techniques/T1497) during automated discovery to shape follow-on behaviors.(Citation: Deloitte Environment Awareness)Adversaries may search for user activity on the host based on variables such as the speed/frequency of mouse movements and clicks (Citation: Sans Virtual Jan 2016) , browser history, cache, bookmarks, or number of files in common directories such as home or the desktop. Other methods may rely on specific user interaction with the system before the malicious code is activated, such as waiting for a document to close before activating a macro (Citation: Unit 42 Sofacy Nov 2018) or waiting for a user to double click on an embedded image to activate.(Citation: FireEye FIN7 April 2017)" }, { "ControlTitle":"MITRE ATT&CK T1141", @@ -13301,7 +13301,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1585.003", - "ControlDescription":"Cloud Accounts : Adversaries may create accounts with cloud providers that can be used during targeting. Adversaries can use cloud accounts to further their operations, including leveraging cloud storage services such as Dropbox, MEGA, Microsoft OneDrive, or AWS S3 buckets for [Exfiltration to Cloud Storage](https://attack.mitre.org/techniques/T1567/002) or to [Upload Tool](https://attack.mitre.org/techniques/T1608/002)s. Cloud accounts can also be used in the acquisition of infrastructure, such as [Virtual Private Server](https://attack.mitre.org/techniques/T1583/003)s or [Serverless](https://attack.mitre.org/techniques/T1583/007) infrastructure. Establishing cloud accounts may allow adversaries to develop sophisticated capabilities without managing their own servers.(Citation: Awake Security C2 Cloud)Creating [Cloud Accounts](https://attack.mitre.org/techniques/T1585/003) may also require adversaries to establish [Email Accounts](https://attack.mitre.org/techniques/T1585/002) to register with the cloud provider. " + "ControlDescription":"Cloud Accounts : Adversaries may create accounts with cloud providers that can be used during targeting. Adversaries can use cloud accounts to further their operations, including leveraging cloud storage services such as Dropbox, MEGA, Microsoft OneDrive, or AWS S3 buckets for [Exfiltration to Cloud Storage](https://attack.mitre.org/techniques/T1567/002) or to [Upload Tool](https://attack.mitre.org/techniques/T1608/002)s. Cloud accounts can also be used in the acquisition of infrastructure, such as [Virtual Private Server](https://attack.mitre.org/techniques/T1583/003)s or [Serverless](https://attack.mitre.org/techniques/T1583/007) infrastructure. Establishing cloud accounts may allow adversaries to develop sophisticated capabilities without managing their own servers.(Citation: Awake Security C2 Cloud)Creating [Cloud Accounts](https://attack.mitre.org/techniques/T1585/003) may also require adversaries to establish [Email Accounts](https://attack.mitre.org/techniques/T1585/002) to register with the cloud provider." }, { "ControlTitle":"MITRE ATT&CK T1072", @@ -13325,7 +13325,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1606", - "ControlDescription":"Forge Web Credentials : Adversaries may forge credential materials that can be used to gain access to web applications or Internet services. Web applications and services (hosted in cloud SaaS environments or on-premise servers) often use session cookies, tokens, or other materials to authenticate and authorize user access.Adversaries may generate these credential materials in order to gain access to web resources. This differs from [Steal Web Session Cookie](https://attack.mitre.org/techniques/T1539), [Steal Application Access Token](https://attack.mitre.org/techniques/T1528), and other similar behaviors in that the credentials are new and forged by the adversary, rather than stolen or intercepted from legitimate users.The generation of web credentials often requires secret values, such as passwords, [Private Keys](https://attack.mitre.org/techniques/T1552/004), or other cryptographic seed values.(Citation: GitHub AWS-ADFS-Credential-Generator) Adversaries may also forge tokens by taking advantage of features such as the `AssumeRole` and `GetFederationToken` APIs in AWS, which allow users to request temporary security credentials (i.e., [Temporary Elevated Cloud Access](https://attack.mitre.org/techniques/T1548/005)), or the `zmprov gdpak` command in Zimbra, which generates a pre-authentication key that can be used to generate tokens for any user in the domain.(Citation: AWS Temporary Security Credentials)(Citation: Zimbra Preauth)Once forged, adversaries may use these web credentials to access resources (ex: [Use Alternate Authentication Material](https://attack.mitre.org/techniques/T1550)), which may bypass multi-factor and other authentication protection mechanisms.(Citation: Pass The Cookie)(Citation: Unit 42 Mac Crypto Cookies January 2019)(Citation: Microsoft SolarWinds Customer Guidance) " + "ControlDescription":"Forge Web Credentials : Adversaries may forge credential materials that can be used to gain access to web applications or Internet services. Web applications and services (hosted in cloud SaaS environments or on-premise servers) often use session cookies, tokens, or other materials to authenticate and authorize user access.Adversaries may generate these credential materials in order to gain access to web resources. This differs from [Steal Web Session Cookie](https://attack.mitre.org/techniques/T1539), [Steal Application Access Token](https://attack.mitre.org/techniques/T1528), and other similar behaviors in that the credentials are new and forged by the adversary, rather than stolen or intercepted from legitimate users.The generation of web credentials often requires secret values, such as passwords, [Private Keys](https://attack.mitre.org/techniques/T1552/004), or other cryptographic seed values.(Citation: GitHub AWS-ADFS-Credential-Generator) Adversaries may also forge tokens by taking advantage of features such as the `AssumeRole` and `GetFederationToken` APIs in AWS, which allow users to request temporary security credentials (i.e., [Temporary Elevated Cloud Access](https://attack.mitre.org/techniques/T1548/005)), or the `zmprov gdpak` command in Zimbra, which generates a pre-authentication key that can be used to generate tokens for any user in the domain.(Citation: AWS Temporary Security Credentials)(Citation: Zimbra Preauth)Once forged, adversaries may use these web credentials to access resources (ex: [Use Alternate Authentication Material](https://attack.mitre.org/techniques/T1550)), which may bypass multi-factor and other authentication protection mechanisms.(Citation: Pass The Cookie)(Citation: Unit 42 Mac Crypto Cookies January 2019)(Citation: Microsoft SolarWinds Customer Guidance) " }, { "ControlTitle":"MITRE ATT&CK T1621", @@ -13353,7 +13353,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1055.014", - "ControlDescription":"VDSO Hijacking : Adversaries may inject malicious code into processes via VDSO hijacking in order to evade process-based defenses as well as possibly elevate privileges. Virtual dynamic shared object (vdso) hijacking is a method of executing arbitrary code in the address space of a separate live process. VDSO hijacking involves redirecting calls to dynamically linked shared libraries. Memory protections may prevent writing executable code to a process via [Ptrace System Calls](https://attack.mitre.org/techniques/T1055/008). However, an adversary may hijack the syscall interface code stubs mapped into a process from the vdso shared object to execute syscalls to open and map a malicious shared object. This code can then be invoked by redirecting the execution flow of the process via patched memory address references stored in a process' global offset table (which store absolute addresses of mapped library functions).(Citation: ELF Injection May 2009)(Citation: Backtrace VDSO)(Citation: VDSO Aug 2005)(Citation: Syscall 2014)Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via VDSO hijacking may also evade detection from security products since the execution is masked under a legitimate process. " + "ControlDescription":"VDSO Hijacking : Adversaries may inject malicious code into processes via VDSO hijacking in order to evade process-based defenses as well as possibly elevate privileges. Virtual dynamic shared object (vdso) hijacking is a method of executing arbitrary code in the address space of a separate live process. VDSO hijacking involves redirecting calls to dynamically linked shared libraries. Memory protections may prevent writing executable code to a process via [Ptrace System Calls](https://attack.mitre.org/techniques/T1055/008). However, an adversary may hijack the syscall interface code stubs mapped into a process from the vdso shared object to execute syscalls to open and map a malicious shared object. This code can then be invoked by redirecting the execution flow of the process via patched memory address references stored in a process' global offset table (which store absolute addresses of mapped library functions).(Citation: ELF Injection May 2009)(Citation: Backtrace VDSO)(Citation: VDSO Aug 2005)(Citation: Syscall 2014)Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via VDSO hijacking may also evade detection from security products since the execution is masked under a legitimate process. " }, { "ControlTitle":"MITRE ATT&CK T1026", @@ -13361,7 +13361,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1071.002", - "ControlDescription":"File Transfer Protocols : Adversaries may communicate using application layer protocols associated with transferring files to avoid detection/network filtering by blending in with existing traffic. Commands to the remote system, and often the results of those commands, will be embedded within the protocol traffic between the client and server. Protocols such as SMB, FTP, FTPS, and TFTP that transfer files may be very common in environments. Packets produced from these protocols may have many fields and headers in which data can be concealed. Data could also be concealed within the transferred files. An adversary may abuse these protocols to communicate with systems under their control within a victim network while also mimicking normal, expected traffic. " + "ControlDescription":"File Transfer Protocols : Adversaries may communicate using application layer protocols associated with transferring files to avoid detection/network filtering by blending in with existing traffic. Commands to the remote system, and often the results of those commands, will be embedded within the protocol traffic between the client and server. Protocols such as SMB, FTP, FTPS, and TFTP that transfer files may be very common in environments. Packets produced from these protocols may have many fields and headers in which data can be concealed. Data could also be concealed within the transferred files. An adversary may abuse these protocols to communicate with systems under their control within a victim network while also mimicking normal, expected traffic." }, { "ControlTitle":"MITRE ATT&CK T1122", @@ -13453,7 +13453,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1056.002", - "ControlDescription":"GUI Input Capture : Adversaries may mimic common operating system GUI components to prompt users for credentials with a seemingly legitimate prompt. When programs are executed that need additional privileges than are present in the current user context, it is common for the operating system to prompt the user for proper credentials to authorize the elevated privileges for the task (ex: [Bypass User Account Control](https://attack.mitre.org/techniques/T1548/002)).Adversaries may mimic this functionality to prompt users for credentials with a seemingly legitimate prompt for a number of reasons that mimic normal usage, such as a fake installer requiring additional access or a fake malware removal suite.(Citation: OSX Malware Exploits MacKeeper) This type of prompt can be used to collect credentials via various languages such as [AppleScript](https://attack.mitre.org/techniques/T1059/002)(Citation: LogRhythm Do You Trust Oct 2014)(Citation: OSX Keydnap malware)(Citation: Spoofing credential dialogs) and [PowerShell](https://attack.mitre.org/techniques/T1059/001).(Citation: LogRhythm Do You Trust Oct 2014)(Citation: Enigma Phishing for Credentials Jan 2015)(Citation: Spoofing credential dialogs) On Linux systems adversaries may launch dialog boxes prompting users for credentials from malicious shell scripts or the command line (i.e. [Unix Shell](https://attack.mitre.org/techniques/T1059/004)).(Citation: Spoofing credential dialogs) " + "ControlDescription":"GUI Input Capture : Adversaries may mimic common operating system GUI components to prompt users for credentials with a seemingly legitimate prompt. When programs are executed that need additional privileges than are present in the current user context, it is common for the operating system to prompt the user for proper credentials to authorize the elevated privileges for the task (ex: [Bypass User Account Control](https://attack.mitre.org/techniques/T1548/002)).Adversaries may mimic this functionality to prompt users for credentials with a seemingly legitimate prompt for a number of reasons that mimic normal usage, such as a fake installer requiring additional access or a fake malware removal suite.(Citation: OSX Malware Exploits MacKeeper) This type of prompt can be used to collect credentials via various languages such as [AppleScript](https://attack.mitre.org/techniques/T1059/002)(Citation: LogRhythm Do You Trust Oct 2014)(Citation: OSX Keydnap malware)(Citation: Spoofing credential dialogs) and [PowerShell](https://attack.mitre.org/techniques/T1059/001).(Citation: LogRhythm Do You Trust Oct 2014)(Citation: Enigma Phishing for Credentials Jan 2015)(Citation: Spoofing credential dialogs) On Linux systems adversaries may launch dialog boxes prompting users for credentials from malicious shell scripts or the command line (i.e. [Unix Shell](https://attack.mitre.org/techniques/T1059/004)).(Citation: Spoofing credential dialogs)" }, { "ControlTitle":"MITRE ATT&CK T1097", @@ -13533,7 +13533,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1001", - "ControlDescription":"Data Obfuscation : Adversaries may obfuscate command and control traffic to make it more difficult to detect. Command and control (C2) communications are hidden (but not necessarily encrypted) in an attempt to make the content more difficult to discover or decipher and to make the communication less conspicuous and hide commands from being seen. This encompasses many methods, such as adding junk data to protocol traffic, using steganography, or impersonating legitimate protocols. " + "ControlDescription":"Data Obfuscation : Adversaries may obfuscate command and control traffic to make it more difficult to detect. Command and control (C2) communications are hidden (but not necessarily encrypted) in an attempt to make the content more difficult to discover or decipher and to make the communication less conspicuous and hide commands from being seen. This encompasses many methods, such as adding junk data to protocol traffic, using steganography, or impersonating legitimate protocols." }, { "ControlTitle":"MITRE ATT&CK T1039", @@ -13577,7 +13577,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1055.012", - "ControlDescription":"Process Hollowing : Adversaries may inject malicious code into suspended and hollowed processes in order to evade process-based defenses. Process hollowing is a method of executing arbitrary code in the address space of a separate live process. Process hollowing is commonly performed by creating a process in a suspended state then unmapping/hollowing its memory, which can then be replaced with malicious code. A victim process can be created with native Windows API calls such as CreateProcess, which includes a flag to suspend the processes primary thread. At this point the process can be unmapped using APIs calls such as ZwUnmapViewOfSection or NtUnmapViewOfSection before being written to, realigned to the injected code, and resumed via VirtualAllocEx, WriteProcessMemory, SetThreadContext, then ResumeThread respectively.(Citation: Leitch Hollowing)(Citation: Elastic Process Injection July 2017)This is very similar to [Thread Local Storage](https://attack.mitre.org/techniques/T1055/005) but creates a new process rather than targeting an existing process. This behavior will likely not result in elevated privileges since the injected process was spawned from (and thus inherits the security context) of the injecting process. However, execution via process hollowing may also evade detection from security products since the execution is masked under a legitimate process. " + "ControlDescription":"Process Hollowing : Adversaries may inject malicious code into suspended and hollowed processes in order to evade process-based defenses. Process hollowing is a method of executing arbitrary code in the address space of a separate live process. Process hollowing is commonly performed by creating a process in a suspended state then unmapping/hollowing its memory, which can then be replaced with malicious code. A victim process can be created with native Windows API calls such as CreateProcess, which includes a flag to suspend the processes primary thread. At this point the process can be unmapped using APIs calls such as ZwUnmapViewOfSection or NtUnmapViewOfSection before being written to, realigned to the injected code, and resumed via VirtualAllocEx, WriteProcessMemory, SetThreadContext, then ResumeThread respectively.(Citation: Leitch Hollowing)(Citation: Elastic Process Injection July 2017)This is very similar to [Thread Local Storage](https://attack.mitre.org/techniques/T1055/005) but creates a new process rather than targeting an existing process. This behavior will likely not result in elevated privileges since the injected process was spawned from (and thus inherits the security context) of the injecting process. However, execution via process hollowing may also evade detection from security products since the execution is masked under a legitimate process." }, { "ControlTitle":"MITRE ATT&CK T1068", @@ -13589,7 +13589,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1531", - "ControlDescription":"Account Access Removal : Adversaries may interrupt availability of system and network resources by inhibiting access to accounts utilized by legitimate users. Accounts may be deleted, locked, or manipulated (ex: changed credentials) to remove access to accounts. Adversaries may also subsequently log off and/or perform a [System Shutdown/Reboot](https://attack.mitre.org/techniques/T1529) to set malicious changes into place.(Citation: CarbonBlack LockerGoga 2019)(Citation: Unit42 LockerGoga 2019)In Windows, [Net](https://attack.mitre.org/software/S0039) utility, Set-LocalUser and Set-ADAccountPassword [PowerShell](https://attack.mitre.org/techniques/T1059/001) cmdlets may be used by adversaries to modify user accounts. In Linux, the passwd utility may be used to change passwords. Accounts could also be disabled by Group Policy. Adversaries who use ransomware or similar attacks may first perform this and other Impact behaviors, such as [Data Destruction](https://attack.mitre.org/techniques/T1485) and [Defacement](https://attack.mitre.org/techniques/T1491), in order to impede incident response/recovery before completing the [Data Encrypted for Impact](https://attack.mitre.org/techniques/T1486) objective. " + "ControlDescription":"Account Access Removal : Adversaries may interrupt availability of system and network resources by inhibiting access to accounts utilized by legitimate users. Accounts may be deleted, locked, or manipulated (ex: changed credentials) to remove access to accounts. Adversaries may also subsequently log off and/or perform a [System Shutdown/Reboot](https://attack.mitre.org/techniques/T1529) to set malicious changes into place.(Citation: CarbonBlack LockerGoga 2019)(Citation: Unit42 LockerGoga 2019)In Windows, [Net](https://attack.mitre.org/software/S0039) utility, Set-LocalUser and Set-ADAccountPassword [PowerShell](https://attack.mitre.org/techniques/T1059/001) cmdlets may be used by adversaries to modify user accounts. In Linux, the passwd utility may be used to change passwords. Accounts could also be disabled by Group Policy. Adversaries who use ransomware or similar attacks may first perform this and other Impact behaviors, such as [Data Destruction](https://attack.mitre.org/techniques/T1485) and [Defacement](https://attack.mitre.org/techniques/T1491), in order to impede incident response/recovery before completing the [Data Encrypted for Impact](https://attack.mitre.org/techniques/T1486) objective." }, { "ControlTitle":"MITRE ATT&CK T1110.004", @@ -13601,11 +13601,11 @@ }, { "ControlTitle":"MITRE ATT&CK T1027", - "ControlDescription":"Obfuscated Files or Information : Adversaries may attempt to make an executable or file difficult to discover or analyze by encrypting, encoding, or otherwise obfuscating its contents on the system or in transit. This is common behavior that can be used across different platforms and the network to evade defenses. Payloads may be compressed, archived, or encrypted in order to avoid detection. These payloads may be used during Initial Access or later to mitigate detection. Sometimes a user's action may be required to open and [Deobfuscate/Decode Files or Information](https://attack.mitre.org/techniques/T1140) for [User Execution](https://attack.mitre.org/techniques/T1204). The user may also be required to input a password to open a password protected compressed/encrypted file that was provided by the adversary. (Citation: Volexity PowerDuke November 2016) Adversaries may also use compressed or archived scripts, such as JavaScript. Portions of files can also be encoded to hide the plain-text strings that would otherwise help defenders with discovery. (Citation: Linux/Cdorked.A We Live Security Analysis) Payloads may also be split into separate, seemingly benign files that only reveal malicious functionality when reassembled. (Citation: Carbon Black Obfuscation Sept 2016)Adversaries may also abuse [Command Obfuscation](https://attack.mitre.org/techniques/T1027/010) to obscure commands executed from payloads or directly via [Command and Scripting Interpreter](https://attack.mitre.org/techniques/T1059). Environment variables, aliases, characters, and other platform/language specific semantics can be used to evade signature based detections and application control mechanisms. (Citation: FireEye Obfuscation June 2017) (Citation: FireEye Revoke-Obfuscation July 2017)(Citation: PaloAlto EncodedCommand March 2017) " + "ControlDescription":"Obfuscated Files or Information : Adversaries may attempt to make an executable or file difficult to discover or analyze by encrypting, encoding, or otherwise obfuscating its contents on the system or in transit. This is common behavior that can be used across different platforms and the network to evade defenses. Payloads may be compressed, archived, or encrypted in order to avoid detection. These payloads may be used during Initial Access or later to mitigate detection. Sometimes a user's action may be required to open and [Deobfuscate/Decode Files or Information](https://attack.mitre.org/techniques/T1140) for [User Execution](https://attack.mitre.org/techniques/T1204). The user may also be required to input a password to open a password protected compressed/encrypted file that was provided by the adversary. (Citation: Volexity PowerDuke November 2016) Adversaries may also use compressed or archived scripts, such as JavaScript. Portions of files can also be encoded to hide the plain-text strings that would otherwise help defenders with discovery. (Citation: Linux/Cdorked.A We Live Security Analysis) Payloads may also be split into separate, seemingly benign files that only reveal malicious functionality when reassembled. (Citation: Carbon Black Obfuscation Sept 2016)Adversaries may also abuse [Command Obfuscation](https://attack.mitre.org/techniques/T1027/010) to obscure commands executed from payloads or directly via [Command and Scripting Interpreter](https://attack.mitre.org/techniques/T1059). Environment variables, aliases, characters, and other platform/language specific semantics can be used to evade signature based detections and application control mechanisms. (Citation: FireEye Obfuscation June 2017) (Citation: FireEye Revoke-Obfuscation July 2017)(Citation: PaloAlto EncodedCommand March 2017)" }, { "ControlTitle":"MITRE ATT&CK T1556.006", - "ControlDescription":"Multi-Factor Authentication : Adversaries may disable or modify multi-factor authentication (MFA) mechanisms to enable persistent access to compromised accounts.Once adversaries have gained access to a network by either compromising an account lacking MFA or by employing an MFA bypass method such as [Multi-Factor Authentication Request Generation](https://attack.mitre.org/techniques/T1621), adversaries may leverage their access to modify or completely disable MFA defenses. This can be accomplished by abusing legitimate features, such as excluding users from Azure AD Conditional Access Policies, registering a new yet vulnerable/adversary-controlled MFA method, or by manually patching MFA programs and configuration files to bypass expected functionality.(Citation: Mandiant APT42)(Citation: Azure AD Conditional Access Exclusions)For example, modifying the Windows hosts file (`C:/windows/system32/drivers/etc/hosts`) to redirect MFA calls to localhost instead of an MFA server may cause the MFA process to fail. If a 'fail open' policy is in place, any otherwise successful authentication attempt may be granted access without enforcing MFA. (Citation: Russians Exploit Default MFA Protocol - CISA March 2022) Depending on the scope, goals, and privileges of the adversary, MFA defenses may be disabled for individual accounts or for all accounts tied to a larger group, such as all domain accounts in a victim's network environment.(Citation: Russians Exploit Default MFA Protocol - CISA March 2022) " + "ControlDescription":"Multi-Factor Authentication : Adversaries may disable or modify multi-factor authentication (MFA) mechanisms to enable persistent access to compromised accounts.Once adversaries have gained access to a network by either compromising an account lacking MFA or by employing an MFA bypass method such as [Multi-Factor Authentication Request Generation](https://attack.mitre.org/techniques/T1621), adversaries may leverage their access to modify or completely disable MFA defenses. This can be accomplished by abusing legitimate features, such as excluding users from Azure AD Conditional Access Policies, registering a new yet vulnerable/adversary-controlled MFA method, or by manually patching MFA programs and configuration files to bypass expected functionality.(Citation: Mandiant APT42)(Citation: Azure AD Conditional Access Exclusions)For example, modifying the Windows hosts file (`C:/windows/system32/drivers/etc/hosts`) to redirect MFA calls to localhost instead of an MFA server may cause the MFA process to fail. If a 'fail open' policy is in place, any otherwise successful authentication attempt may be granted access without enforcing MFA. (Citation: Russians Exploit Default MFA Protocol - CISA March 2022) Depending on the scope, goals, and privileges of the adversary, MFA defenses may be disabled for individual accounts or for all accounts tied to a larger group, such as all domain accounts in a victim's network environment.(Citation: Russians Exploit Default MFA Protocol - CISA March 2022)" }, { "ControlTitle":"MITRE ATT&CK T1114.002", @@ -13633,7 +13633,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1546", - "ControlDescription":"Event Triggered Execution : Adversaries may establish persistence and/or elevate privileges using system mechanisms that trigger execution based on specific events. Various operating systems have means to monitor and subscribe to events such as logons or other user activity such as running specific applications/binaries. Cloud environments may also support various functions and services that monitor and can be invoked in response to specific cloud events.(Citation: Backdooring an AWS account)(Citation: Varonis Power Automate Data Exfiltration)(Citation: Microsoft DART Case Report 001)Adversaries may abuse these mechanisms as a means of maintaining persistent access to a victim via repeatedly executing malicious code. After gaining access to a victim system, adversaries may create/modify event triggers to point to malicious content that will be executed whenever the event trigger is invoked.(Citation: FireEye WMI 2015)(Citation: Malware Persistence on OS X)(Citation: amnesia malware)Since the execution can be proxied by an account with higher permissions, such as SYSTEM or service accounts, an adversary may be able to abuse these triggered execution mechanisms to escalate their privileges. " + "ControlDescription":"Event Triggered Execution : Adversaries may establish persistence and/or elevate privileges using system mechanisms that trigger execution based on specific events. Various operating systems have means to monitor and subscribe to events such as logons or other user activity such as running specific applications/binaries. Cloud environments may also support various functions and services that monitor and can be invoked in response to specific cloud events.(Citation: Backdooring an AWS account)(Citation: Varonis Power Automate Data Exfiltration)(Citation: Microsoft DART Case Report 001)Adversaries may abuse these mechanisms as a means of maintaining persistent access to a victim via repeatedly executing malicious code. After gaining access to a victim system, adversaries may create/modify event triggers to point to malicious content that will be executed whenever the event trigger is invoked.(Citation: FireEye WMI 2015)(Citation: Malware Persistence on OS X)(Citation: amnesia malware)Since the execution can be proxied by an account with higher permissions, such as SYSTEM or service accounts, an adversary may be able to abuse these triggered execution mechanisms to escalate their privileges." }, { "ControlTitle":"MITRE ATT&CK T1546.004", @@ -13661,7 +13661,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1553", - "ControlDescription":"Subvert Trust Controls : Adversaries may undermine security controls that will either warn users of untrusted activity or prevent execution of untrusted programs. Operating systems and security products may contain mechanisms to identify programs or websites as possessing some level of trust. Examples of such features would include a program being allowed to run because it is signed by a valid code signing certificate, a program prompting the user with a warning because it has an attribute set from being downloaded from the Internet, or getting an indication that you are about to connect to an untrusted site.Adversaries may attempt to subvert these trust mechanisms. The method adversaries use will depend on the specific mechanism they seek to subvert. Adversaries may conduct [File and Directory Permissions Modification](https://attack.mitre.org/techniques/T1222) or [Modify Registry](https://attack.mitre.org/techniques/T1112) in support of subverting these controls.(Citation: SpectorOps Subverting Trust Sept 2017) Adversaries may also create or steal code signing certificates to acquire trust on target systems.(Citation: Securelist Digital Certificates)(Citation: Symantec Digital Certificates) " + "ControlDescription":"Subvert Trust Controls : Adversaries may undermine security controls that will either warn users of untrusted activity or prevent execution of untrusted programs. Operating systems and security products may contain mechanisms to identify programs or websites as possessing some level of trust. Examples of such features would include a program being allowed to run because it is signed by a valid code signing certificate, a program prompting the user with a warning because it has an attribute set from being downloaded from the Internet, or getting an indication that you are about to connect to an untrusted site.Adversaries may attempt to subvert these trust mechanisms. The method adversaries use will depend on the specific mechanism they seek to subvert. Adversaries may conduct [File and Directory Permissions Modification](https://attack.mitre.org/techniques/T1222) or [Modify Registry](https://attack.mitre.org/techniques/T1112) in support of subverting these controls.(Citation: SpectorOps Subverting Trust Sept 2017) Adversaries may also create or steal code signing certificates to acquire trust on target systems.(Citation: Securelist Digital Certificates)(Citation: Symantec Digital Certificates)" }, { "ControlTitle":"MITRE ATT&CK T1548.004", @@ -13725,15 +13725,15 @@ }, { "ControlTitle":"MITRE ATT&CK T1546.015", - "ControlDescription":"Component Object Model Hijacking : Adversaries may establish persistence by executing malicious content triggered by hijacked references to Component Object Model (COM) objects. COM is a system within Windows to enable interaction between software components through the operating system.(Citation: Microsoft Component Object Model) References to various COM objects are stored in the Registry. Adversaries can use the COM system to insert malicious code that can be executed in place of legitimate software through hijacking the COM references and relationships as a means for persistence. Hijacking a COM object requires a change in the Registry to replace a reference to a legitimate system component which may cause that component to not work when executed. When that system component is executed through normal system operation the adversary's code will be executed instead.(Citation: GDATA COM Hijacking) An adversary is likely to hijack objects that are used frequently enough to maintain a consistent level of persistence, but are unlikely to break noticeable functionality within the system as to avoid system instability that could lead to detection. " + "ControlDescription":"Component Object Model Hijacking : Adversaries may establish persistence by executing malicious content triggered by hijacked references to Component Object Model (COM) objects. COM is a system within Windows to enable interaction between software components through the operating system.(Citation: Microsoft Component Object Model) References to various COM objects are stored in the Registry. Adversaries can use the COM system to insert malicious code that can be executed in place of legitimate software through hijacking the COM references and relationships as a means for persistence. Hijacking a COM object requires a change in the Registry to replace a reference to a legitimate system component which may cause that component to not work when executed. When that system component is executed through normal system operation the adversary's code will be executed instead.(Citation: GDATA COM Hijacking) An adversary is likely to hijack objects that are used frequently enough to maintain a consistent level of persistence, but are unlikely to break noticeable functionality within the system as to avoid system instability that could lead to detection." }, { "ControlTitle":"MITRE ATT&CK T1589.001", - "ControlDescription":"Credentials : Adversaries may gather credentials that can be used during targeting. Account credentials gathered by adversaries may be those directly associated with the target victim organization or attempt to take advantage of the tendency for users to use the same passwords across personal and business accounts.Adversaries may gather credentials from potential victims in various ways, such as direct elicitation via [Phishing for Information](https://attack.mitre.org/techniques/T1598). Adversaries may also compromise sites then add malicious content designed to collect website authentication cookies from visitors.(Citation: ATT ScanBox) Credential information may also be exposed to adversaries via leaks to online or other accessible data sets (ex: [Search Engines](https://attack.mitre.org/techniques/T1593/002), breach dumps, code repositories, etc.).(Citation: Register Deloitte)(Citation: Register Uber)(Citation: Detectify Slack Tokens)(Citation: Forbes GitHub Creds)(Citation: GitHub truffleHog)(Citation: GitHub Gitrob)(Citation: CNET Leaks) Adversaries may also purchase credentials from dark web or other black-markets. Finally, where multi-factor authentication (MFA) based on out-of-band communications is in use, adversaries may compromise a service provider to gain access to MFA codes and one-time passwords (OTP).(Citation: Okta Scatter Swine 2022)Gathering this information may reveal opportunities for other forms of reconnaissance (ex: [Search Open Websites/Domains](https://attack.mitre.org/techniques/T1593) or [Phishing for Information](https://attack.mitre.org/techniques/T1598)), establishing operational resources (ex: [Compromise Accounts](https://attack.mitre.org/techniques/T1586)), and/or initial access (ex: [External Remote Services](https://attack.mitre.org/techniques/T1133) or [Valid Accounts](https://attack.mitre.org/techniques/T1078)). " + "ControlDescription":"Credentials : Adversaries may gather credentials that can be used during targeting. Account credentials gathered by adversaries may be those directly associated with the target victim organization or attempt to take advantage of the tendency for users to use the same passwords across personal and business accounts.Adversaries may gather credentials from potential victims in various ways, such as direct elicitation via [Phishing for Information](https://attack.mitre.org/techniques/T1598). Adversaries may also compromise sites then add malicious content designed to collect website authentication cookies from visitors.(Citation: ATT ScanBox) Credential information may also be exposed to adversaries via leaks to online or other accessible data sets (ex: [Search Engines](https://attack.mitre.org/techniques/T1593/002), breach dumps, code repositories, etc.).(Citation: Register Deloitte)(Citation: Register Uber)(Citation: Detectify Slack Tokens)(Citation: Forbes GitHub Creds)(Citation: GitHub truffleHog)(Citation: GitHub Gitrob)(Citation: CNET Leaks) Adversaries may also purchase credentials from dark web or other black-markets. Finally, where multi-factor authentication (MFA) based on out-of-band communications is in use, adversaries may compromise a service provider to gain access to MFA codes and one-time passwords (OTP).(Citation: Okta Scatter Swine 2022)Gathering this information may reveal opportunities for other forms of reconnaissance (ex: [Search Open Websites/Domains](https://attack.mitre.org/techniques/T1593) or [Phishing for Information](https://attack.mitre.org/techniques/T1598)), establishing operational resources (ex: [Compromise Accounts](https://attack.mitre.org/techniques/T1586)), and/or initial access (ex: [External Remote Services](https://attack.mitre.org/techniques/T1133) or [Valid Accounts](https://attack.mitre.org/techniques/T1078))." }, { "ControlTitle":"MITRE ATT&CK T1195.002", - "ControlDescription":"Compromise Software Supply Chain : Adversaries may manipulate application software prior to receipt by a final consumer for the purpose of data or system compromise. Supply chain compromise of software can take place in a number of ways, including manipulation of the application source code, manipulation of the update/distribution mechanism for that software, or replacing compiled releases with a modified version.Targeting may be specific to a desired victim set or may be distributed to a broad set of consumers but only move on to additional tactics on specific victims.(Citation: Avast CCleaner3 2018)(Citation: Command Five SK 2011) " + "ControlDescription":"Compromise Software Supply Chain : Adversaries may manipulate application software prior to receipt by a final consumer for the purpose of data or system compromise. Supply chain compromise of software can take place in a number of ways, including manipulation of the application source code, manipulation of the update/distribution mechanism for that software, or replacing compiled releases with a modified version.Targeting may be specific to a desired victim set or may be distributed to a broad set of consumers but only move on to additional tactics on specific victims.(Citation: Avast CCleaner3 2018)(Citation: Command Five SK 2011) " }, { "ControlTitle":"MITRE ATT&CK T1036.003", @@ -13741,7 +13741,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1102.002", - "ControlDescription":"Bidirectional Communication : Adversaries may use an existing, legitimate external Web service as a means for sending commands to and receiving output from a compromised system over the Web service channel. Compromised systems may leverage popular websites and social media to host command and control (C2) instructions. Those infected systems can then send the output from those commands back over that Web service channel. The return traffic may occur in a variety of ways, depending on the Web service being utilized. For example, the return traffic may take the form of the compromised system posting a comment on a forum, issuing a pull request to development project, updating a document hosted on a Web service, or by sending a Tweet. Popular websites and social media acting as a mechanism for C2 may give a significant amount of cover due to the likelihood that hosts within a network are already communicating with them prior to a compromise. Using common services, such as those offered by Google or Twitter, makes it easier for adversaries to hide in expected noise. Web service providers commonly use SSL/TLS encryption, giving adversaries an added level of protection. " + "ControlDescription":"Bidirectional Communication : Adversaries may use an existing, legitimate external Web service as a means for sending commands to and receiving output from a compromised system over the Web service channel. Compromised systems may leverage popular websites and social media to host command and control (C2) instructions. Those infected systems can then send the output from those commands back over that Web service channel. The return traffic may occur in a variety of ways, depending on the Web service being utilized. For example, the return traffic may take the form of the compromised system posting a comment on a forum, issuing a pull request to development project, updating a document hosted on a Web service, or by sending a Tweet. Popular websites and social media acting as a mechanism for C2 may give a significant amount of cover due to the likelihood that hosts within a network are already communicating with them prior to a compromise. Using common services, such as those offered by Google or Twitter, makes it easier for adversaries to hide in expected noise. Web service providers commonly use SSL/TLS encryption, giving adversaries an added level of protection." }, { "ControlTitle":"MITRE ATT&CK T1203", @@ -13749,7 +13749,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1595.003", - "ControlDescription":"Wordlist Scanning : Adversaries may iteratively probe infrastructure using brute-forcing and crawling techniques. While this technique employs similar methods to [Brute Force](https://attack.mitre.org/techniques/T1110), its goal is the identification of content and infrastructure rather than the discovery of valid credentials. Wordlists used in these scans may contain generic, commonly used names and file extensions or terms specific to a particular software. Adversaries may also create custom, target-specific wordlists using data gathered from other Reconnaissance techniques (ex: [Gather Victim Org Information](https://attack.mitre.org/techniques/T1591), or [Search Victim-Owned Websites](https://attack.mitre.org/techniques/T1594)).For example, adversaries may use web content discovery tools such as Dirb, DirBuster, and GoBuster and generic or custom wordlists to enumerate a website's pages and directories.(Citation: ClearSky Lebanese Cedar Jan 2021) This can help them to discover old, vulnerable pages or hidden administrative portals that could become the target of further operations (ex: [Exploit Public-Facing Application](https://attack.mitre.org/techniques/T1190) or [Brute Force](https://attack.mitre.org/techniques/T1110)). As cloud storage solutions typically use globally unique names, adversaries may also use target-specific wordlists and tools such as s3recon and GCPBucketBrute to enumerate public and private buckets on cloud infrastructure.(Citation: S3Recon GitHub)(Citation: GCPBucketBrute) Once storage objects are discovered, adversaries may leverage [Data from Cloud Storage](https://attack.mitre.org/techniques/T1530) to access valuable information that can be exfiltrated or used to escalate privileges and move laterally. " + "ControlDescription":"Wordlist Scanning : Adversaries may iteratively probe infrastructure using brute-forcing and crawling techniques. While this technique employs similar methods to [Brute Force](https://attack.mitre.org/techniques/T1110), its goal is the identification of content and infrastructure rather than the discovery of valid credentials. Wordlists used in these scans may contain generic, commonly used names and file extensions or terms specific to a particular software. Adversaries may also create custom, target-specific wordlists using data gathered from other Reconnaissance techniques (ex: [Gather Victim Org Information](https://attack.mitre.org/techniques/T1591), or [Search Victim-Owned Websites](https://attack.mitre.org/techniques/T1594)).For example, adversaries may use web content discovery tools such as Dirb, DirBuster, and GoBuster and generic or custom wordlists to enumerate a website's pages and directories.(Citation: ClearSky Lebanese Cedar Jan 2021) This can help them to discover old, vulnerable pages or hidden administrative portals that could become the target of further operations (ex: [Exploit Public-Facing Application](https://attack.mitre.org/techniques/T1190) or [Brute Force](https://attack.mitre.org/techniques/T1110)). As cloud storage solutions typically use globally unique names, adversaries may also use target-specific wordlists and tools such as s3recon and GCPBucketBrute to enumerate public and private buckets on cloud infrastructure.(Citation: S3Recon GitHub)(Citation: GCPBucketBrute) Once storage objects are discovered, adversaries may leverage [Data from Cloud Storage](https://attack.mitre.org/techniques/T1530) to access valuable information that can be exfiltrated or used to escalate privileges and move laterally." }, { "ControlTitle":"MITRE ATT&CK T1562.011", @@ -13765,7 +13765,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1567.002", - "ControlDescription":"Exfiltration to Cloud Storage : Adversaries may exfiltrate data to a cloud storage service rather than over their primary command and control channel. Cloud storage services allow for the storage, edit, and retrieval of data from a remote cloud storage server over the Internet.Examples of cloud storage services include Dropbox and Google Docs. Exfiltration to these cloud storage services can provide a significant amount of cover to the adversary if hosts within the network are already communicating with the service. " + "ControlDescription":"Exfiltration to Cloud Storage : Adversaries may exfiltrate data to a cloud storage service rather than over their primary command and control channel. Cloud storage services allow for the storage, edit, and retrieval of data from a remote cloud storage server over the Internet.Examples of cloud storage services include Dropbox and Google Docs. Exfiltration to these cloud storage services can provide a significant amount of cover to the adversary if hosts within the network are already communicating with the service." }, { "ControlTitle":"MITRE ATT&CK T1570", @@ -13817,7 +13817,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1027.003", - "ControlDescription":"Steganography : Adversaries may use steganography techniques in order to prevent the detection of hidden information. Steganographic techniques can be used to hide data in digital media such as images, audio tracks, video clips, or text files.[Duqu](https://attack.mitre.org/software/S0038) was an early example of malware that used steganography. It encrypted the gathered information from a victim's system and hid it within an image before exfiltrating the image to a C2 server.(Citation: Wikipedia Duqu) By the end of 2017, a threat group used\u202fInvoke-PSImage\u202fto hide [PowerShell](https://attack.mitre.org/techniques/T1059/001) commands in an image file (.png) and execute the code on a victim's system. In this particular case the [PowerShell](https://attack.mitre.org/techniques/T1059/001) code downloaded another obfuscated script to gather intelligence from the victim's machine and communicate it back to the adversary.(Citation: McAfee Malicious Doc Targets Pyeongchang Olympics) " + "ControlDescription":"Steganography : Adversaries may use steganography techniques in order to prevent the detection of hidden information. Steganographic techniques can be used to hide data in digital media such as images, audio tracks, video clips, or text files.[Duqu](https://attack.mitre.org/software/S0038) was an early example of malware that used steganography. It encrypted the gathered information from a victim's system and hid it within an image before exfiltrating the image to a C2 server.(Citation: Wikipedia Duqu) By the end of 2017, a threat group used\u202fInvoke-PSImage\u202fto hide [PowerShell](https://attack.mitre.org/techniques/T1059/001) commands in an image file (.png) and execute the code on a victim's system. In this particular case the [PowerShell](https://attack.mitre.org/techniques/T1059/001) code downloaded another obfuscated script to gather intelligence from the victim's machine and communicate it back to the adversary.(Citation: McAfee Malicious Doc Targets Pyeongchang Olympics) " }, { "ControlTitle":"MITRE ATT&CK T1584.002", @@ -13825,7 +13825,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1001.003", - "ControlDescription":"Protocol Impersonation : Adversaries may impersonate legitimate protocols or web service traffic to disguise command and control activity and thwart analysis efforts. By impersonating legitimate protocols or web services, adversaries can make their command and control traffic blend in with legitimate network traffic. Adversaries may impersonate a fake SSL/TLS handshake to make it look like subsequent traffic is SSL/TLS encrypted, potentially interfering with some security tooling, or to make the traffic look like it is related with a trusted entity. " + "ControlDescription":"Protocol Impersonation : Adversaries may impersonate legitimate protocols or web service traffic to disguise command and control activity and thwart analysis efforts. By impersonating legitimate protocols or web services, adversaries can make their command and control traffic blend in with legitimate network traffic. Adversaries may impersonate a fake SSL/TLS handshake to make it look like subsequent traffic is SSL/TLS encrypted, potentially interfering with some security tooling, or to make the traffic look like it is related with a trusted entity." }, { "ControlTitle":"MITRE ATT&CK T1012", @@ -13857,7 +13857,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1506", - "ControlDescription":"Web Session Cookie : Adversaries can use stolen session cookies to authenticate to web applications and services. This technique bypasses some multi-factor authentication protocols since the session is already authenticated.(Citation: Pass The Cookie)Authentication cookies are commonly used in web applications, including cloud-based services, after a user has authenticated to the service so credentials are not passed and re-authentication does not need to occur as frequently. Cookies are often valid for an extended period of time, even if the web application is not actively used. After the cookie is obtained through [Steal Web Session Cookie](https://attack.mitre.org/techniques/T1539), the adversary then imports the cookie into a browser they control and is able to use the site or application as the user for as long as the session cookie is active. Once logged into the site, an adversary can access sensitive information, read email, or perform actions that the victim account has permissions to perform.There have been examples of malware targeting session cookies to bypass multi-factor authentication systems.(Citation: Unit 42 Mac Crypto Cookies January 2019) " + "ControlDescription":"Web Session Cookie : Adversaries can use stolen session cookies to authenticate to web applications and services. This technique bypasses some multi-factor authentication protocols since the session is already authenticated.(Citation: Pass The Cookie)Authentication cookies are commonly used in web applications, including cloud-based services, after a user has authenticated to the service so credentials are not passed and re-authentication does not need to occur as frequently. Cookies are often valid for an extended period of time, even if the web application is not actively used. After the cookie is obtained through [Steal Web Session Cookie](https://attack.mitre.org/techniques/T1539), the adversary then imports the cookie into a browser they control and is able to use the site or application as the user for as long as the session cookie is active. Once logged into the site, an adversary can access sensitive information, read email, or perform actions that the victim account has permissions to perform.There have been examples of malware targeting session cookies to bypass multi-factor authentication systems.(Citation: Unit 42 Mac Crypto Cookies January 2019)" }, { "ControlTitle":"MITRE ATT&CK T1553.004", @@ -14001,7 +14001,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1543.001", - "ControlDescription":"Launch Agent : Adversaries may create or modify launch agents to repeatedly execute malicious payloads as part of persistence. When a user logs in, a per-user launchd process is started which loads the parameters for each launch-on-demand user agent from the property list (.plist) file found in /System/Library/LaunchAgents, /Library/LaunchAgents, and ~/Library/LaunchAgents.(Citation: AppleDocs Launch Agent Daemons)(Citation: OSX Keydnap malware) (Citation: Antiquated Mac Malware) Property list files use the Label, ProgramArguments , and RunAtLoad keys to identify the Launch Agent's name, executable location, and execution time.(Citation: OSX.Dok Malware) Launch Agents are often installed to perform updates to programs, launch user specified programs at login, or to conduct other developer tasks. Launch Agents can also be executed using the [Launchctl](https://attack.mitre.org/techniques/T1569/001) command.\n \nAdversaries may install a new Launch Agent that executes at login by placing a .plist file into the appropriate folders with the RunAtLoad or KeepAlive keys set to true.(Citation: Sofacy Komplex Trojan)(Citation: Methods of Mac Malware Persistence) The Launch Agent name may be disguised by using a name from the related operating system or benign software. Launch Agents are created with user level privileges and execute with user level permissions.(Citation: OSX Malware Detection)(Citation: OceanLotus for OS X) " + "ControlDescription":"Launch Agent : Adversaries may create or modify launch agents to repeatedly execute malicious payloads as part of persistence. When a user logs in, a per-user launchd process is started which loads the parameters for each launch-on-demand user agent from the property list (.plist) file found in /System/Library/LaunchAgents, /Library/LaunchAgents, and ~/Library/LaunchAgents.(Citation: AppleDocs Launch Agent Daemons)(Citation: OSX Keydnap malware) (Citation: Antiquated Mac Malware) Property list files use the Label, ProgramArguments , and RunAtLoad keys to identify the Launch Agent's name, executable location, and execution time.(Citation: OSX.Dok Malware) Launch Agents are often installed to perform updates to programs, launch user specified programs at login, or to conduct other developer tasks. Launch Agents can also be executed using the [Launchctl](https://attack.mitre.org/techniques/T1569/001) command.\n \nAdversaries may install a new Launch Agent that executes at login by placing a .plist file into the appropriate folders with the RunAtLoad or KeepAlive keys set to true.(Citation: Sofacy Komplex Trojan)(Citation: Methods of Mac Malware Persistence) The Launch Agent name may be disguised by using a name from the related operating system or benign software. Launch Agents are created with user level privileges and execute with user level permissions.(Citation: OSX Malware Detection)(Citation: OceanLotus for OS X)" }, { "ControlTitle":"MITRE ATT&CK T1569", @@ -14013,7 +14013,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1055.009", - "ControlDescription":"Proc Memory : Adversaries may inject malicious code into processes via the /proc filesystem in order to evade process-based defenses as well as possibly elevate privileges. Proc memory injection is a method of executing arbitrary code in the address space of a separate live process. Proc memory injection involves enumerating the memory of a process via the /proc filesystem (/proc/[pid]) then crafting a return-oriented programming (ROP) payload with available gadgets/instructions. Each running process has its own directory, which includes memory mappings. Proc memory injection is commonly performed by overwriting the target processes' stack using memory mappings provided by the /proc filesystem. This information can be used to enumerate offsets (including the stack) and gadgets (or instructions within the program that can be used to build a malicious payload) otherwise hidden by process memory protections such as address space layout randomization (ASLR). Once enumerated, the target processes' memory map within /proc/[pid]/maps can be overwritten using dd.(Citation: Uninformed Needle)(Citation: GDS Linux Injection)(Citation: DD Man) Other techniques such as [Dynamic Linker Hijacking](https://attack.mitre.org/techniques/T1574/006) may be used to populate a target process with more available gadgets. Similar to [Process Hollowing](https://attack.mitre.org/techniques/T1055/012), proc memory injection may target child processes (such as a backgrounded copy of sleep).(Citation: GDS Linux Injection) Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via proc memory injection may also evade detection from security products since the execution is masked under a legitimate process. " + "ControlDescription":"Proc Memory : Adversaries may inject malicious code into processes via the /proc filesystem in order to evade process-based defenses as well as possibly elevate privileges. Proc memory injection is a method of executing arbitrary code in the address space of a separate live process. Proc memory injection involves enumerating the memory of a process via the /proc filesystem (/proc/[pid]) then crafting a return-oriented programming (ROP) payload with available gadgets/instructions. Each running process has its own directory, which includes memory mappings. Proc memory injection is commonly performed by overwriting the target processes' stack using memory mappings provided by the /proc filesystem. This information can be used to enumerate offsets (including the stack) and gadgets (or instructions within the program that can be used to build a malicious payload) otherwise hidden by process memory protections such as address space layout randomization (ASLR). Once enumerated, the target processes' memory map within /proc/[pid]/maps can be overwritten using dd.(Citation: Uninformed Needle)(Citation: GDS Linux Injection)(Citation: DD Man) Other techniques such as [Dynamic Linker Hijacking](https://attack.mitre.org/techniques/T1574/006) may be used to populate a target process with more available gadgets. Similar to [Process Hollowing](https://attack.mitre.org/techniques/T1055/012), proc memory injection may target child processes (such as a backgrounded copy of sleep).(Citation: GDS Linux Injection) Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via proc memory injection may also evade detection from security products since the execution is masked under a legitimate process." }, { "ControlTitle":"MITRE ATT&CK T1223", @@ -14025,7 +14025,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1601.001", - "ControlDescription":"Patch System Image : Adversaries may modify the operating system of a network device to introduce new capabilities or weaken existing defenses.(Citation: Killing the myth of Cisco IOS rootkits) (Citation: Killing IOS diversity myth) (Citation: Cisco IOS Shellcode) (Citation: Cisco IOS Forensics Developments) (Citation: Juniper Netscreen of the Dead) Some network devices are built with a monolithic architecture, where the entire operating system and most of the functionality of the device is contained within a single file. Adversaries may change this file in storage, to be loaded in a future boot, or in memory during runtime.To change the operating system in storage, the adversary will typically use the standard procedures available to device operators. This may involve downloading a new file via typical protocols used on network devices, such as TFTP, FTP, SCP, or a console connection. The original file may be overwritten, or a new file may be written alongside of it and the device reconfigured to boot to the compromised image.To change the operating system in memory, the adversary typically can use one of two methods. In the first, the adversary would make use of native debug commands in the original, unaltered running operating system that allow them to directly modify the relevant memory addresses containing the running operating system. This method typically requires administrative level access to the device.In the second method for changing the operating system in memory, the adversary would make use of the boot loader. The boot loader is the first piece of software that loads when the device starts that, in turn, will launch the operating system. Adversaries may use malicious code previously implanted in the boot loader, such as through the [ROMMONkit](https://attack.mitre.org/techniques/T1542/004) method, to directly manipulate running operating system code in memory. This malicious code in the bootloader provides the capability of direct memory manipulation to the adversary, allowing them to patch the live operating system during runtime.By modifying the instructions stored in the system image file, adversaries may either weaken existing defenses or provision new capabilities that the device did not have before. Examples of existing defenses that can be impeded include encryption, via [Weaken Encryption](https://attack.mitre.org/techniques/T1600), authentication, via [Network Device Authentication](https://attack.mitre.org/techniques/T1556/004), and perimeter defenses, via [Network Boundary Bridging](https://attack.mitre.org/techniques/T1599). Adding new capabilities for the adversary's purpose include [Keylogging](https://attack.mitre.org/techniques/T1056/001), [Multi-hop Proxy](https://attack.mitre.org/techniques/T1090/003), and [Port Knocking](https://attack.mitre.org/techniques/T1205/001).Adversaries may also compromise existing commands in the operating system to produce false output to mislead defenders. When this method is used in conjunction with [Downgrade System Image](https://attack.mitre.org/techniques/T1601/002), one example of a compromised system command may include changing the output of the command that shows the version of the currently running operating system. By patching the operating system, the adversary can change this command to instead display the original, higher revision number that they replaced through the system downgrade. When the operating system is patched in storage, this can be achieved in either the resident storage (typically a form of flash memory, which is non-volatile) or via [TFTP Boot](https://attack.mitre.org/techniques/T1542/005). When the technique is performed on the running operating system in memory and not on the stored copy, this technique will not survive across reboots. However, live memory modification of the operating system can be combined with [ROMMONkit](https://attack.mitre.org/techniques/T1542/004) to achieve persistence. " + "ControlDescription":"Patch System Image : Adversaries may modify the operating system of a network device to introduce new capabilities or weaken existing defenses.(Citation: Killing the myth of Cisco IOS rootkits) (Citation: Killing IOS diversity myth) (Citation: Cisco IOS Shellcode) (Citation: Cisco IOS Forensics Developments) (Citation: Juniper Netscreen of the Dead) Some network devices are built with a monolithic architecture, where the entire operating system and most of the functionality of the device is contained within a single file. Adversaries may change this file in storage, to be loaded in a future boot, or in memory during runtime.To change the operating system in storage, the adversary will typically use the standard procedures available to device operators. This may involve downloading a new file via typical protocols used on network devices, such as TFTP, FTP, SCP, or a console connection. The original file may be overwritten, or a new file may be written alongside of it and the device reconfigured to boot to the compromised image.To change the operating system in memory, the adversary typically can use one of two methods. In the first, the adversary would make use of native debug commands in the original, unaltered running operating system that allow them to directly modify the relevant memory addresses containing the running operating system. This method typically requires administrative level access to the device.In the second method for changing the operating system in memory, the adversary would make use of the boot loader. The boot loader is the first piece of software that loads when the device starts that, in turn, will launch the operating system. Adversaries may use malicious code previously implanted in the boot loader, such as through the [ROMMONkit](https://attack.mitre.org/techniques/T1542/004) method, to directly manipulate running operating system code in memory. This malicious code in the bootloader provides the capability of direct memory manipulation to the adversary, allowing them to patch the live operating system during runtime.By modifying the instructions stored in the system image file, adversaries may either weaken existing defenses or provision new capabilities that the device did not have before. Examples of existing defenses that can be impeded include encryption, via [Weaken Encryption](https://attack.mitre.org/techniques/T1600), authentication, via [Network Device Authentication](https://attack.mitre.org/techniques/T1556/004), and perimeter defenses, via [Network Boundary Bridging](https://attack.mitre.org/techniques/T1599). Adding new capabilities for the adversary's purpose include [Keylogging](https://attack.mitre.org/techniques/T1056/001), [Multi-hop Proxy](https://attack.mitre.org/techniques/T1090/003), and [Port Knocking](https://attack.mitre.org/techniques/T1205/001).Adversaries may also compromise existing commands in the operating system to produce false output to mislead defenders. When this method is used in conjunction with [Downgrade System Image](https://attack.mitre.org/techniques/T1601/002), one example of a compromised system command may include changing the output of the command that shows the version of the currently running operating system. By patching the operating system, the adversary can change this command to instead display the original, higher revision number that they replaced through the system downgrade. When the operating system is patched in storage, this can be achieved in either the resident storage (typically a form of flash memory, which is non-volatile) or via [TFTP Boot](https://attack.mitre.org/techniques/T1542/005). When the technique is performed on the running operating system in memory and not on the stored copy, this technique will not survive across reboots. However, live memory modification of the operating system can be combined with [ROMMONkit](https://attack.mitre.org/techniques/T1542/004) to achieve persistence." }, { "ControlTitle":"MITRE ATT&CK T1558.002", @@ -14069,7 +14069,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1132.002", - "ControlDescription":"Non-Standard Encoding : Adversaries may encode data with a non-standard data encoding system to make the content of command and control traffic more difficult to detect. Command and control (C2) information can be encoded using a non-standard data encoding system that diverges from existing protocol specifications. Non-standard data encoding schemes may be based on or related to standard data encoding schemes, such as a modified Base64 encoding for the message body of an HTTP request.(Citation: Wikipedia Binary-to-text Encoding) (Citation: Wikipedia Character Encoding) " + "ControlDescription":"Non-Standard Encoding : Adversaries may encode data with a non-standard data encoding system to make the content of command and control traffic more difficult to detect. Command and control (C2) information can be encoded using a non-standard data encoding system that diverges from existing protocol specifications. Non-standard data encoding schemes may be based on or related to standard data encoding schemes, such as a modified Base64 encoding for the message body of an HTTP request.(Citation: Wikipedia Binary-to-text Encoding) (Citation: Wikipedia Character Encoding)" }, { "ControlTitle":"MITRE ATT&CK T1556.001", @@ -14077,7 +14077,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1537", - "ControlDescription":"Transfer Data to Cloud Account : Adversaries may exfiltrate data by transferring the data, including backups of cloud environments, to another cloud account they control on the same service to avoid typical file transfers/downloads and network-based exfiltration detection.A defender who is monitoring for large transfers to outside the cloud environment through normal file transfers or over command and control channels may not be watching for data transfers to another account within the same cloud provider. Such transfers may utilize existing cloud provider APIs and the internal address space of the cloud provider to blend into normal traffic or avoid data transfers over external network interfaces.Incidents have been observed where adversaries have created backups of cloud instances and transferred them to separate accounts.(Citation: DOJ GRU Indictment Jul 2018) " + "ControlDescription":"Transfer Data to Cloud Account : Adversaries may exfiltrate data by transferring the data, including backups of cloud environments, to another cloud account they control on the same service to avoid typical file transfers/downloads and network-based exfiltration detection.A defender who is monitoring for large transfers to outside the cloud environment through normal file transfers or over command and control channels may not be watching for data transfers to another account within the same cloud provider. Such transfers may utilize existing cloud provider APIs and the internal address space of the cloud provider to blend into normal traffic or avoid data transfers over external network interfaces.Incidents have been observed where adversaries have created backups of cloud instances and transferred them to separate accounts.(Citation: DOJ GRU Indictment Jul 2018)" }, { "ControlTitle":"MITRE ATT&CK T1027.006", @@ -14153,7 +14153,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1027.002", - "ControlDescription":"Software Packing : Adversaries may perform software packing or virtual machine software protection to conceal their code. Software packing is a method of compressing or encrypting an executable. Packing an executable changes the file signature in an attempt to avoid signature-based detection. Most decompression techniques decompress the executable code in memory. Virtual machine software protection translates an executable's original code into a special format that only a special virtual machine can run. A virtual machine is then called to run this code.(Citation: ESET FinFisher Jan 2018) Utilities used to perform software packing are called packers. Example packers are MPRESS and UPX. A more comprehensive list of known packers is available, but adversaries may create their own packing techniques that do not leave the same artifacts as well-known packers to evade defenses.(Citation: Awesome Executable Packing) " + "ControlDescription":"Software Packing : Adversaries may perform software packing or virtual machine software protection to conceal their code. Software packing is a method of compressing or encrypting an executable. Packing an executable changes the file signature in an attempt to avoid signature-based detection. Most decompression techniques decompress the executable code in memory. Virtual machine software protection translates an executable's original code into a special format that only a special virtual machine can run. A virtual machine is then called to run this code.(Citation: ESET FinFisher Jan 2018) Utilities used to perform software packing are called packers. Example packers are MPRESS and UPX. A more comprehensive list of known packers is available, but adversaries may create their own packing techniques that do not leave the same artifacts as well-known packers to evade defenses.(Citation: Awesome Executable Packing) " }, { "ControlTitle":"MITRE ATT&CK T1584.007", @@ -14161,7 +14161,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1071.001", - "ControlDescription":"Web Protocols : Adversaries may communicate using application layer protocols associated with web traffic to avoid detection/network filtering by blending in with existing traffic. Commands to the remote system, and often the results of those commands, will be embedded within the protocol traffic between the client and server. Protocols such as HTTP/S(Citation: CrowdStrike Putter Panda) and WebSocket(Citation: Brazking-Websockets) that carry web traffic may be very common in environments. HTTP/S packets have many fields and headers in which data can be concealed. An adversary may abuse these protocols to communicate with systems under their control within a victim network while also mimicking normal, expected traffic. " + "ControlDescription":"Web Protocols : Adversaries may communicate using application layer protocols associated with web traffic to avoid detection/network filtering by blending in with existing traffic. Commands to the remote system, and often the results of those commands, will be embedded within the protocol traffic between the client and server. Protocols such as HTTP/S(Citation: CrowdStrike Putter Panda) and WebSocket(Citation: Brazking-Websockets) that carry web traffic may be very common in environments. HTTP/S packets have many fields and headers in which data can be concealed. An adversary may abuse these protocols to communicate with systems under their control within a victim network while also mimicking normal, expected traffic." }, { "ControlTitle":"MITRE ATT&CK T1059.005", @@ -14173,7 +14173,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1543.002", - "ControlDescription":"Systemd Service : Adversaries may create or modify systemd services to repeatedly execute malicious payloads as part of persistence. Systemd is a system and service manager commonly used for managing background daemon processes (also known as services) and other system resources.(Citation: Linux man-pages: systemd January 2014) Systemd is the default initialization (init) system on many Linux distributions replacing legacy init systems, including SysVinit and Upstart, while remaining backwards compatible. Systemd utilizes unit configuration files with the `.service` file extension to encode information about a service's process. By default, system level unit files are stored in the `/systemd/system` directory of the root owned directories (`/`). User level unit files are stored in the `/systemd/user` directories of the user owned directories (`$HOME`).(Citation: lambert systemd 2022) Inside the `.service` unit files, the following directives are used to execute commands:(Citation: freedesktop systemd.service) * `ExecStart`, `ExecStartPre`, and `ExecStartPost` directives execute when a service is started manually by `systemctl` or on system start if the service is set to automatically start.\n* `ExecReload` directive executes when a service restarts. \n* `ExecStop`, `ExecStopPre`, and `ExecStopPost` directives execute when a service is stopped. Adversaries have created new service files, altered the commands a `.service` file's directive executes, and modified the user directive a `.service` file executes as, which could result in privilege escalation. Adversaries may also place symbolic links in these directories, enabling systemd to find these payloads regardless of where they reside on the filesystem.(Citation: Anomali Rocke March 2019)(Citation: airwalk backdoor unix systems)(Citation: Rapid7 Service Persistence 22JUNE2016) " + "ControlDescription":"Systemd Service : Adversaries may create or modify systemd services to repeatedly execute malicious payloads as part of persistence. Systemd is a system and service manager commonly used for managing background daemon processes (also known as services) and other system resources.(Citation: Linux man-pages: systemd January 2014) Systemd is the default initialization (init) system on many Linux distributions replacing legacy init systems, including SysVinit and Upstart, while remaining backwards compatible. Systemd utilizes unit configuration files with the `.service` file extension to encode information about a service's process. By default, system level unit files are stored in the `/systemd/system` directory of the root owned directories (`/`). User level unit files are stored in the `/systemd/user` directories of the user owned directories (`$HOME`).(Citation: lambert systemd 2022) Inside the `.service` unit files, the following directives are used to execute commands:(Citation: freedesktop systemd.service) * `ExecStart`, `ExecStartPre`, and `ExecStartPost` directives execute when a service is started manually by `systemctl` or on system start if the service is set to automatically start.\n* `ExecReload` directive executes when a service restarts. \n* `ExecStop`, `ExecStopPre`, and `ExecStopPost` directives execute when a service is stopped. Adversaries have created new service files, altered the commands a `.service` file's directive executes, and modified the user directive a `.service` file executes as, which could result in privilege escalation. Adversaries may also place symbolic links in these directories, enabling systemd to find these payloads regardless of where they reside on the filesystem.(Citation: Anomali Rocke March 2019)(Citation: airwalk backdoor unix systems)(Citation: Rapid7 Service Persistence 22JUNE2016)" }, { "ControlTitle":"MITRE ATT&CK T1563.002", @@ -14221,7 +14221,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1055.005", - "ControlDescription":"Thread Local Storage : Adversaries may inject malicious code into processes via thread local storage (TLS) callbacks in order to evade process-based defenses as well as possibly elevate privileges. TLS callback injection is a method of executing arbitrary code in the address space of a separate live process. TLS callback injection involves manipulating pointers inside a portable executable (PE) to redirect a process to malicious code before reaching the code's legitimate entry point. TLS callbacks are normally used by the OS to setup and/or cleanup data used by threads. Manipulating TLS callbacks may be performed by allocating and writing to specific offsets within a process' memory space using other [Process Injection](https://attack.mitre.org/techniques/T1055) techniques such as [Process Hollowing](https://attack.mitre.org/techniques/T1055/012).(Citation: FireEye TLS Nov 2017)Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via TLS callback injection may also evade detection from security products since the execution is masked under a legitimate process. " + "ControlDescription":"Thread Local Storage : Adversaries may inject malicious code into processes via thread local storage (TLS) callbacks in order to evade process-based defenses as well as possibly elevate privileges. TLS callback injection is a method of executing arbitrary code in the address space of a separate live process. TLS callback injection involves manipulating pointers inside a portable executable (PE) to redirect a process to malicious code before reaching the code's legitimate entry point. TLS callbacks are normally used by the OS to setup and/or cleanup data used by threads. Manipulating TLS callbacks may be performed by allocating and writing to specific offsets within a process' memory space using other [Process Injection](https://attack.mitre.org/techniques/T1055) techniques such as [Process Hollowing](https://attack.mitre.org/techniques/T1055/012).(Citation: FireEye TLS Nov 2017)Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via TLS callback injection may also evade detection from security products since the execution is masked under a legitimate process." }, { "ControlTitle":"MITRE ATT&CK T1622", @@ -14281,7 +14281,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1055.008", - "ControlDescription":"Ptrace System Calls : Adversaries may inject malicious code into processes via ptrace (process trace) system calls in order to evade process-based defenses as well as possibly elevate privileges. Ptrace system call injection is a method of executing arbitrary code in the address space of a separate live process. Ptrace system call injection involves attaching to and modifying a running process. The ptrace system call enables a debugging process to observe and control another process (and each individual thread), including changing memory and register values.(Citation: PTRACE man) Ptrace system call injection is commonly performed by writing arbitrary code into a running process (ex: malloc) then invoking that memory with PTRACE_SETREGS to set the register containing the next instruction to execute. Ptrace system call injection can also be done with PTRACE_POKETEXT/PTRACE_POKEDATA, which copy data to a specific address in the target processes' memory (ex: the current address of the next instruction). (Citation: PTRACE man)(Citation: Medium Ptrace JUL 2018) Ptrace system call injection may not be possible targeting processes that are non-child processes and/or have higher-privileges.(Citation: BH Linux Inject) Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via ptrace system call injection may also evade detection from security products since the execution is masked under a legitimate process. " + "ControlDescription":"Ptrace System Calls : Adversaries may inject malicious code into processes via ptrace (process trace) system calls in order to evade process-based defenses as well as possibly elevate privileges. Ptrace system call injection is a method of executing arbitrary code in the address space of a separate live process. Ptrace system call injection involves attaching to and modifying a running process. The ptrace system call enables a debugging process to observe and control another process (and each individual thread), including changing memory and register values.(Citation: PTRACE man) Ptrace system call injection is commonly performed by writing arbitrary code into a running process (ex: malloc) then invoking that memory with PTRACE_SETREGS to set the register containing the next instruction to execute. Ptrace system call injection can also be done with PTRACE_POKETEXT/PTRACE_POKEDATA, which copy data to a specific address in the target processes' memory (ex: the current address of the next instruction). (Citation: PTRACE man)(Citation: Medium Ptrace JUL 2018) Ptrace system call injection may not be possible targeting processes that are non-child processes and/or have higher-privileges.(Citation: BH Linux Inject) Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via ptrace system call injection may also evade detection from security products since the execution is masked under a legitimate process." }, { "ControlTitle":"MITRE ATT&CK T1653", @@ -14297,7 +14297,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1037.001", - "ControlDescription":"Logon Script (Windows) : Adversaries may use Windows logon scripts automatically executed at logon initialization to establish persistence. Windows allows logon scripts to be run whenever a specific user or group of users log into a system.(Citation: TechNet Logon Scripts) This is done via adding a path to a script to the HKCU/Environment/UserInitMprLogonScript Registry key.(Citation: Hexacorn Logon Scripts)Adversaries may use these scripts to maintain persistence on a single system. Depending on the access configuration of the logon scripts, either local credentials or an administrator account may be necessary. " + "ControlDescription":"Logon Script (Windows) : Adversaries may use Windows logon scripts automatically executed at logon initialization to establish persistence. Windows allows logon scripts to be run whenever a specific user or group of users log into a system.(Citation: TechNet Logon Scripts) This is done via adding a path to a script to the HKCU/Environment/UserInitMprLogonScript Registry key.(Citation: Hexacorn Logon Scripts)Adversaries may use these scripts to maintain persistence on a single system. Depending on the access configuration of the logon scripts, either local credentials or an administrator account may be necessary." }, { "ControlTitle":"MITRE ATT&CK T1055.015", @@ -14345,11 +14345,11 @@ }, { "ControlTitle":"MITRE ATT&CK T1602.001", - "ControlDescription":"SNMP (MIB Dump) : Adversaries may target the Management Information Base (MIB) to collect and/or mine valuable information in a network managed using Simple Network Management Protocol (SNMP).The MIB is a configuration repository that stores variable information accessible via SNMP in the form of object identifiers (OID). Each OID identifies a variable that can be read or set and permits active management tasks, such as configuration changes, through remote modification of these variables. SNMP can give administrators great insight in their systems, such as, system information, description of hardware, physical location, and software packages(Citation: SANS Information Security Reading Room Securing SNMP Securing SNMP). The MIB may also contain device operational information, including running configuration, routing table, and interface details.Adversaries may use SNMP queries to collect MIB content directly from SNMP-managed devices in order to collect network information that allows the adversary to build network maps and facilitate future targeted exploitation.(Citation: US-CERT-TA18-106A)(Citation: Cisco Blog Legacy Device Attacks) " + "ControlDescription":"SNMP (MIB Dump) : Adversaries may target the Management Information Base (MIB) to collect and/or mine valuable information in a network managed using Simple Network Management Protocol (SNMP).The MIB is a configuration repository that stores variable information accessible via SNMP in the form of object identifiers (OID). Each OID identifies a variable that can be read or set and permits active management tasks, such as configuration changes, through remote modification of these variables. SNMP can give administrators great insight in their systems, such as, system information, description of hardware, physical location, and software packages(Citation: SANS Information Security Reading Room Securing SNMP Securing SNMP). The MIB may also contain device operational information, including running configuration, routing table, and interface details.Adversaries may use SNMP queries to collect MIB content directly from SNMP-managed devices in order to collect network information that allows the adversary to build network maps and facilitate future targeted exploitation.(Citation: US-CERT-TA18-106A)(Citation: Cisco Blog Legacy Device Attacks)" }, { "ControlTitle":"MITRE ATT&CK T1001.002", - "ControlDescription":"Steganography : Adversaries may use steganographic techniques to hide command and control traffic to make detection efforts more difficult. Steganographic techniques can be used to hide data in digital messages that are transferred between systems. This hidden information can be used for command and control of compromised systems. In some cases, the passing of files embedded using steganography, such as image or document files, can be used for command and control. " + "ControlDescription":"Steganography : Adversaries may use steganographic techniques to hide command and control traffic to make detection efforts more difficult. Steganographic techniques can be used to hide data in digital messages that are transferred between systems. This hidden information can be used for command and control of compromised systems. In some cases, the passing of files embedded using steganography, such as image or document files, can be used for command and control." }, { "ControlTitle":"MITRE ATT&CK T1204.001", @@ -14409,7 +14409,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1055.001", - "ControlDescription":"Dynamic-link Library Injection : Adversaries may inject dynamic-link libraries (DLLs) into processes in order to evade process-based defenses as well as possibly elevate privileges. DLL injection is a method of executing arbitrary code in the address space of a separate live process. DLL injection is commonly performed by writing the path to a DLL in the virtual address space of the target process before loading the DLL by invoking a new thread. The write can be performed with native Windows API calls such as VirtualAllocEx and WriteProcessMemory, then invoked with CreateRemoteThread (which calls the LoadLibrary API responsible for loading the DLL). (Citation: Elastic Process Injection July 2017) Variations of this method such as reflective DLL injection (writing a self-mapping DLL into a process) and memory module (map DLL when writing into process) overcome the address relocation issue as well as the additional APIs to invoke execution (since these methods load and execute the files in memory by manually preforming the function of LoadLibrary).(Citation: Elastic HuntingNMemory June 2017)(Citation: Elastic Process Injection July 2017) Another variation of this method, often referred to as Module Stomping/Overloading or DLL Hollowing, may be leveraged to conceal injected code within a process. This method involves loading a legitimate DLL into a remote process then manually overwriting the module's AddressOfEntryPoint before starting a new thread in the target process.(Citation: Module Stomping for Shellcode Injection) This variation allows attackers to hide malicious injected code by potentially backing its execution with a legitimate DLL file on disk.(Citation: Hiding Malicious Code with Module Stomping) Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via DLL injection may also evade detection from security products since the execution is masked under a legitimate process. " + "ControlDescription":"Dynamic-link Library Injection : Adversaries may inject dynamic-link libraries (DLLs) into processes in order to evade process-based defenses as well as possibly elevate privileges. DLL injection is a method of executing arbitrary code in the address space of a separate live process. DLL injection is commonly performed by writing the path to a DLL in the virtual address space of the target process before loading the DLL by invoking a new thread. The write can be performed with native Windows API calls such as VirtualAllocEx and WriteProcessMemory, then invoked with CreateRemoteThread (which calls the LoadLibrary API responsible for loading the DLL). (Citation: Elastic Process Injection July 2017) Variations of this method such as reflective DLL injection (writing a self-mapping DLL into a process) and memory module (map DLL when writing into process) overcome the address relocation issue as well as the additional APIs to invoke execution (since these methods load and execute the files in memory by manually preforming the function of LoadLibrary).(Citation: Elastic HuntingNMemory June 2017)(Citation: Elastic Process Injection July 2017) Another variation of this method, often referred to as Module Stomping/Overloading or DLL Hollowing, may be leveraged to conceal injected code within a process. This method involves loading a legitimate DLL into a remote process then manually overwriting the module's AddressOfEntryPoint before starting a new thread in the target process.(Citation: Module Stomping for Shellcode Injection) This variation allows attackers to hide malicious injected code by potentially backing its execution with a legitimate DLL file on disk.(Citation: Hiding Malicious Code with Module Stomping) Running code in the context of another process may allow access to the process's memory, system/network resources, and possibly elevated privileges. Execution via DLL injection may also evade detection from security products since the execution is masked under a legitimate process." }, { "ControlTitle":"MITRE ATT&CK T1086", @@ -14429,7 +14429,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1495", - "ControlDescription":"Firmware Corruption : Adversaries may overwrite or corrupt the flash memory contents of system BIOS or other firmware in devices attached to a system in order to render them inoperable or unable to boot, thus denying the availability to use the devices and/or the system.(Citation: Symantec Chernobyl W95.CIH) Firmware is software that is loaded and executed from non-volatile memory on hardware devices in order to initialize and manage device functionality. These devices may include the motherboard, hard drive, or video cards.In general, adversaries may manipulate, overwrite, or corrupt firmware in order to deny the use of the system or devices. For example, corruption of firmware responsible for loading the operating system for network devices may render the network devices inoperable.(Citation: dhs_threat_to_net_devices)(Citation: cisa_malware_orgs_ukraine) Depending on the device, this attack may also result in [Data Destruction](https://attack.mitre.org/techniques/T1485). " + "ControlDescription":"Firmware Corruption : Adversaries may overwrite or corrupt the flash memory contents of system BIOS or other firmware in devices attached to a system in order to render them inoperable or unable to boot, thus denying the availability to use the devices and/or the system.(Citation: Symantec Chernobyl W95.CIH) Firmware is software that is loaded and executed from non-volatile memory on hardware devices in order to initialize and manage device functionality. These devices may include the motherboard, hard drive, or video cards.In general, adversaries may manipulate, overwrite, or corrupt firmware in order to deny the use of the system or devices. For example, corruption of firmware responsible for loading the operating system for network devices may render the network devices inoperable.(Citation: dhs_threat_to_net_devices)(Citation: cisa_malware_orgs_ukraine) Depending on the device, this attack may also result in [Data Destruction](https://attack.mitre.org/techniques/T1485)." }, { "ControlTitle":"MITRE ATT&CK T1490", @@ -14465,7 +14465,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1001.001", - "ControlDescription":"Junk Data : Adversaries may add junk data to protocols used for command and control to make detection more difficult. By adding random or meaningless data to the protocols used for command and control, adversaries can prevent trivial methods for decoding, deciphering, or otherwise analyzing the traffic. Examples may include appending/prepending data with junk characters or writing junk characters between significant characters. " + "ControlDescription":"Junk Data : Adversaries may add junk data to protocols used for command and control to make detection more difficult. By adding random or meaningless data to the protocols used for command and control, adversaries can prevent trivial methods for decoding, deciphering, or otherwise analyzing the traffic. Examples may include appending/prepending data with junk characters or writing junk characters between significant characters." }, { "ControlTitle":"MITRE ATT&CK T1598.001", @@ -14477,7 +14477,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1552.007", - "ControlDescription":"Container API : Adversaries may gather credentials via APIs within a containers environment. APIs in these environments, such as the Docker API and Kubernetes APIs, allow a user to remotely manage their container resources and cluster components.(Citation: Docker API)(Citation: Kubernetes API)An adversary may access the Docker API to collect logs that contain credentials to cloud, container, and various other resources in the environment.(Citation: Unit 42 Unsecured Docker Daemons) An adversary with sufficient permissions, such as via a pod's service account, may also use the Kubernetes API to retrieve credentials from the Kubernetes API server. These credentials may include those needed for Docker API authentication or secrets from Kubernetes cluster components. " + "ControlDescription":"Container API : Adversaries may gather credentials via APIs within a containers environment. APIs in these environments, such as the Docker API and Kubernetes APIs, allow a user to remotely manage their container resources and cluster components.(Citation: Docker API)(Citation: Kubernetes API)An adversary may access the Docker API to collect logs that contain credentials to cloud, container, and various other resources in the environment.(Citation: Unit 42 Unsecured Docker Daemons) An adversary with sufficient permissions, such as via a pod's service account, may also use the Kubernetes API to retrieve credentials from the Kubernetes API server. These credentials may include those needed for Docker API authentication or secrets from Kubernetes cluster components." }, { "ControlTitle":"MITRE ATT&CK T1584.001", @@ -14485,7 +14485,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1505.001", - "ControlDescription":"SQL Stored Procedures : Adversaries may abuse SQL stored procedures to establish persistent access to systems. SQL Stored Procedures are code that can be saved and reused so that database users do not waste time rewriting frequently used SQL queries. Stored procedures can be invoked via SQL statements to the database using the procedure name or via defined events (e.g. when a SQL server application is started/restarted).Adversaries may craft malicious stored procedures that can provide a persistence mechanism in SQL database servers.(Citation: NetSPI Startup Stored Procedures)(Citation: Kaspersky MSSQL Aug 2019) To execute operating system commands through SQL syntax the adversary may have to enable additional functionality, such as xp_cmdshell for MSSQL Server.(Citation: NetSPI Startup Stored Procedures)(Citation: Kaspersky MSSQL Aug 2019)(Citation: Microsoft xp_cmdshell 2017) Microsoft SQL Server can enable common language runtime (CLR) integration. With CLR integration enabled, application developers can write stored procedures using any .NET framework language (e.g. VB .NET, C#, etc.).(Citation: Microsoft CLR Integration 2017) Adversaries may craft or modify CLR assemblies that are linked to stored procedures since these CLR assemblies can be made to execute arbitrary commands.(Citation: NetSPI SQL Server CLR) " + "ControlDescription":"SQL Stored Procedures : Adversaries may abuse SQL stored procedures to establish persistent access to systems. SQL Stored Procedures are code that can be saved and reused so that database users do not waste time rewriting frequently used SQL queries. Stored procedures can be invoked via SQL statements to the database using the procedure name or via defined events (e.g. when a SQL server application is started/restarted).Adversaries may craft malicious stored procedures that can provide a persistence mechanism in SQL database servers.(Citation: NetSPI Startup Stored Procedures)(Citation: Kaspersky MSSQL Aug 2019) To execute operating system commands through SQL syntax the adversary may have to enable additional functionality, such as xp_cmdshell for MSSQL Server.(Citation: NetSPI Startup Stored Procedures)(Citation: Kaspersky MSSQL Aug 2019)(Citation: Microsoft xp_cmdshell 2017) Microsoft SQL Server can enable common language runtime (CLR) integration. With CLR integration enabled, application developers can write stored procedures using any .NET framework language (e.g. VB .NET, C#, etc.).(Citation: Microsoft CLR Integration 2017) Adversaries may craft or modify CLR assemblies that are linked to stored procedures since these CLR assemblies can be made to execute arbitrary commands.(Citation: NetSPI SQL Server CLR)" }, { "ControlTitle":"MITRE ATT&CK T1556.004", @@ -14497,7 +14497,7 @@ }, { "ControlTitle":"MITRE ATT&CK T1048.003", - "ControlDescription":"Exfiltration Over Unencrypted Non-C2 Protocol : Adversaries may steal data by exfiltrating it over an un-encrypted network protocol other than that of the existing command and control channel. The data may also be sent to an alternate network location from the main command and control server.(Citation: copy_cmd_cisco)Adversaries may opt to obfuscate this data, without the use of encryption, within network protocols that are natively unencrypted (such as HTTP, FTP, or DNS). This may include custom or publicly available encoding/compression algorithms (such as base64) as well as embedding data within protocol headers and fields. " + "ControlDescription":"Exfiltration Over Unencrypted Non-C2 Protocol : Adversaries may steal data by exfiltrating it over an un-encrypted network protocol other than that of the existing command and control channel. The data may also be sent to an alternate network location from the main command and control server.(Citation: copy_cmd_cisco)Adversaries may opt to obfuscate this data, without the use of encryption, within network protocols that are natively unencrypted (such as HTTP, FTP, or DNS). This may include custom or publicly available encoding/compression algorithms (such as base64) as well as embedding data within protocol headers and fields." }, { "ControlTitle":"MITRE ATT&CK T1574.004", @@ -14505,11 +14505,11 @@ }, { "ControlTitle":"MITRE ATT&CK T1601.002", - "ControlDescription":"Downgrade System Image : Adversaries may install an older version of the operating system of a network device to weaken security. Older operating system versions on network devices often have weaker encryption ciphers and, in general, fewer/less updated defensive features. (Citation: Cisco Synful Knock Evolution)On embedded devices, downgrading the version typically only requires replacing the operating system file in storage. With most embedded devices, this can be achieved by downloading a copy of the desired version of the operating system file and reconfiguring the device to boot from that file on next system restart. The adversary could then restart the device to implement the change immediately or they could wait until the next time the system restarts.Downgrading the system image to an older versions may allow an adversary to evade defenses by enabling behaviors such as [Weaken Encryption](https://attack.mitre.org/techniques/T1600). Downgrading of a system image can be done on its own, or it can be used in conjunction with [Patch System Image](https://attack.mitre.org/techniques/T1601/001). " + "ControlDescription":"Downgrade System Image : Adversaries may install an older version of the operating system of a network device to weaken security. Older operating system versions on network devices often have weaker encryption ciphers and, in general, fewer/less updated defensive features. (Citation: Cisco Synful Knock Evolution)On embedded devices, downgrading the version typically only requires replacing the operating system file in storage. With most embedded devices, this can be achieved by downloading a copy of the desired version of the operating system file and reconfiguring the device to boot from that file on next system restart. The adversary could then restart the device to implement the change immediately or they could wait until the next time the system restarts.Downgrading the system image to an older versions may allow an adversary to evade defenses by enabling behaviors such as [Weaken Encryption](https://attack.mitre.org/techniques/T1600). Downgrading of a system image can be done on its own, or it can be used in conjunction with [Patch System Image](https://attack.mitre.org/techniques/T1601/001). " }, { "ControlTitle":"MITRE ATT&CK T1078.003", - "ControlDescription":"Local Accounts : Adversaries may obtain and abuse credentials of a local account as a means of gaining Initial Access, Persistence, Privilege Escalation, or Defense Evasion. Local accounts are those configured by an organization for use by users, remote support, services, or for administration on a single system or service.Local Accounts may also be abused to elevate privileges and harvest credentials through [OS Credential Dumping](https://attack.mitre.org/techniques/T1003). Password reuse may allow the abuse of local accounts across a set of machines on a network for the purposes of Privilege Escalation and Lateral Movement. " + "ControlDescription":"Local Accounts : Adversaries may obtain and abuse credentials of a local account as a means of gaining Initial Access, Persistence, Privilege Escalation, or Defense Evasion. Local accounts are those configured by an organization for use by users, remote support, services, or for administration on a single system or service.Local Accounts may also be abused to elevate privileges and harvest credentials through [OS Credential Dumping](https://attack.mitre.org/techniques/T1003). Password reuse may allow the abuse of local accounts across a set of machines on a network for the purposes of Privilege Escalation and Lateral Movement." }, { "ControlTitle":"MITRE ATT&CK T1211", @@ -15561,634 +15561,694 @@ }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.1.2", - "ControlDescription": "Ensure that 'Multi-Factor Auth Status' is 'Enabled' for all Privileged Users " + "ControlDescription": "Ensure that 'Multi-Factor Auth Status' is 'Enabled' for all Privileged Users" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.1.3", - "ControlDescription": "Ensure that 'Multi-Factor Auth Status' is 'Enabled' for all Non-Privileged Users " + "ControlDescription": "Ensure that 'Multi-Factor Auth Status' is 'Enabled' for all Non-Privileged Users" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.1.4", - "ControlDescription": "Ensure that 'Allow users to remember multi-factor authentication on devices they trust' is Disabled " + "ControlDescription": "Ensure that 'Allow users to remember multi-factor authentication on devices they trust' is Disabled" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.2.1", - "ControlDescription": "Ensure Trusted Locations Are Defined " + "ControlDescription": "Ensure Trusted Locations Are Defined" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.2.2", - "ControlDescription": "Ensure that an exclusionary Geographic Access Policy is considered " + "ControlDescription": "Ensure that an exclusionary Geographic Access Policy is considered" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.2.3", - "ControlDescription": "Ensure that A Multi-factor Authentication Policy Exists for Administrative Groups " + "ControlDescription": "Ensure that A Multi-factor Authentication Policy Exists for Administrative Groups" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.2.4", - "ControlDescription": "Ensure that A Multi-factor Authentication Policy Exists for All Users " + "ControlDescription": "Ensure that A Multi-factor Authentication Policy Exists for All Users" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.2.5", - "ControlDescription": "Ensure Multi-factor Authentication is Required for Risky Sign-ins " + "ControlDescription": "Ensure Multi-factor Authentication is Required for Risky Sign-ins" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.2.6", - "ControlDescription": "Ensure Multi-factor Authentication is Required for Azure Management " + "ControlDescription": "Ensure Multi-factor Authentication is Required for Azure Management" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.4", - "ControlDescription": "Ensure Access Review is Set Up for External Users in Azure AD Privileged Identity Management " + "ControlDescription": "Ensure Access Review is Set Up for External Users in Azure AD Privileged Identity Management" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.5", - "ControlDescription": "Ensure Guest Users Are Reviewed on a Regular Basis " + "ControlDescription": "Ensure Guest Users Are Reviewed on a Regular Basis" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.6", - "ControlDescription": "Ensure That 'Number of methods required to reset' is set to '2' " + "ControlDescription": "Ensure That 'Number of methods required to reset' is set to '2'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.7", - "ControlDescription": "Ensure that a Custom Bad Password List is set to 'Enforce' for your Organization " + "ControlDescription": "Ensure that a Custom Bad Password List is set to 'Enforce' for your Organization" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.8", - "ControlDescription": "Ensure that 'Number of days before users are asked to re-confirm their authentication information' is not set to '0' " + "ControlDescription": "Ensure that 'Number of days before users are asked to re-confirm their authentication information' is not set to '0'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.9", - "ControlDescription": "Ensure that 'Notify users on password resets?' is set to 'Yes' " + "ControlDescription": "Ensure that 'Notify users on password resets?' is set to 'Yes'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.1", - "ControlDescription": "Ensure That 'Notify all admins when other admins reset their password?' is set to 'Yes' " + "ControlDescription": "Ensure That 'Notify all admins when other admins reset their password?' is set to 'Yes'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.11", - "ControlDescription": "Ensure `User consent for applications` is set to `Do not allow user consent` " + "ControlDescription": "Ensure `User consent for applications` is set to `Do not allow user consent`" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.12", - "ControlDescription": "Ensure \u0081eUser consent for applications\u0081f Is Set To \u0081eAllow for Verified Publishers\u0081f " + "ControlDescription": "Ensure \u0081eUser consent for applications\u0081f Is Set To \u0081eAllow for Verified Publishers\u0081f" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.13", - "ControlDescription": "Ensure that 'Users can add gallery apps to My Apps' is set to 'No' " + "ControlDescription": "Ensure that 'Users can add gallery apps to My Apps' is set to 'No'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.14", - "ControlDescription": "Ensure That \u0081eUsers Can Register Applications\u0081f Is Set to \u0081eNo\u0081f " + "ControlDescription": "Ensure That \u0081eUsers Can Register Applications\u0081f Is Set to \u0081eNo\u0081f" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.15", - "ControlDescription": "Ensure That 'Guest users access restrictions' is set to 'Guest user access is restricted to properties and memberships of their own directory objects' " + "ControlDescription": "Ensure That 'Guest users access restrictions' is set to 'Guest user access is restricted to properties and memberships of their own directory objects'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.16", - "ControlDescription": "Ensure that 'Guest invite restrictions' is set to \"Only users assigned to specific admin roles can invite guest users\" " + "ControlDescription": "Ensure that 'Guest invite restrictions' is set to \"Only users assigned to specific admin roles can invite guest users\"" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.17", - "ControlDescription": "Ensure That 'Restrict access to Azure AD administration portal' is Set to 'Yes' " + "ControlDescription": "Ensure That 'Restrict access to Azure AD administration portal' is Set to 'Yes'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.18", - "ControlDescription": "Ensure that 'Restrict user ability to access groups features in the Access Pane' is Set to 'Yes' " + "ControlDescription": "Ensure that 'Restrict user ability to access groups features in the Access Pane' is Set to 'Yes'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.19", - "ControlDescription": "Ensure that 'Users can create security groups in Azure portals, API or PowerShell' is set to 'No' " + "ControlDescription": "Ensure that 'Users can create security groups in Azure portals, API or PowerShell' is set to 'No'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.2", - "ControlDescription": "Ensure that 'Owners can manage group membership requests in the Access Panel' is set to 'No' " + "ControlDescription": "Ensure that 'Owners can manage group membership requests in the Access Panel' is set to 'No'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.21", - "ControlDescription": "Ensure that 'Users can create Microsoft 365 groups in Azure portals, API or PowerShell' is set to 'No' " + "ControlDescription": "Ensure that 'Users can create Microsoft 365 groups in Azure portals, API or PowerShell' is set to 'No'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.22", - "ControlDescription": "Ensure that 'Require Multi-Factor Authentication to register or join devices with Azure AD' is set to 'Yes' " + "ControlDescription": "Ensure that 'Require Multi-Factor Authentication to register or join devices with Azure AD' is set to 'Yes'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.23", - "ControlDescription": "Ensure That No Custom Subscription Administrator Roles Exist " + "ControlDescription": "Ensure That No Custom Subscription Administrator Roles Exist" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.24", - "ControlDescription": "Ensure a Custom Role is Assigned Permissions for Administering Resource Locks " + "ControlDescription": "Ensure a Custom Role is Assigned Permissions for Administering Resource Locks" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 1.25", - "ControlDescription": "Ensure That \u0081eSubscription Entering AAD Directory\u0081f and \u0081eSubscription Leaving AAD Directory\u0081f Is Set To \u0081ePermit No One\u0081f " + "ControlDescription": "Ensure That \u0081eSubscription Entering AAD Directory\u0081f and \u0081eSubscription Leaving AAD Directory\u0081f Is Set To \u0081ePermit No One\u0081f" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.1", - "ControlDescription": "Ensure That Microsoft Defender for Servers Is Set to 'On' " + "ControlDescription": "Ensure That Microsoft Defender for Servers Is Set to 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.2", - "ControlDescription": "Ensure That Microsoft Defender for App Services Is Set To 'On' " + "ControlDescription": "Ensure That Microsoft Defender for App Services Is Set To 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.3", - "ControlDescription": "Ensure That Microsoft Defender for Databases Is Set To 'On' " + "ControlDescription": "Ensure That Microsoft Defender for Databases Is Set To 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.4", - "ControlDescription": "Ensure That Microsoft Defender for Azure SQL Databases Is Set To 'On' " + "ControlDescription": "Ensure That Microsoft Defender for Azure SQL Databases Is Set To 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.5", - "ControlDescription": "Ensure That Microsoft Defender for SQL Servers on Machines Is Set To 'On' " + "ControlDescription": "Ensure That Microsoft Defender for SQL Servers on Machines Is Set To 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.6", - "ControlDescription": "Ensure That Microsoft Defender for Open-Source Relational Databases Is Set To 'On' " + "ControlDescription": "Ensure That Microsoft Defender for Open-Source Relational Databases Is Set To 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.7", - "ControlDescription": "Ensure That Microsoft Defender for Storage Is Set To 'On' " + "ControlDescription": "Ensure That Microsoft Defender for Storage Is Set To 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.8", - "ControlDescription": "Ensure That Microsoft Defender for Containers Is Set To 'On' " + "ControlDescription": "Ensure That Microsoft Defender for Containers Is Set To 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.9", - "ControlDescription": "Ensure That Microsoft Defender for Azure Cosmos DB Is Set To 'On' " + "ControlDescription": "Ensure That Microsoft Defender for Azure Cosmos DB Is Set To 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.10", - "ControlDescription": "Ensure That Microsoft Defender for Key Vault Is Set To 'On' " + "ControlDescription": "Ensure That Microsoft Defender for Key Vault Is Set To 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.11", - "ControlDescription": "Ensure That Microsoft Defender for DNS Is Set To 'On' " + "ControlDescription": "Ensure That Microsoft Defender for DNS Is Set To 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.12", - "ControlDescription": "Ensure That Microsoft Defender for Resource Manager Is Set To 'On' " + "ControlDescription": "Ensure That Microsoft Defender for Resource Manager Is Set To 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.13", - "ControlDescription": "Ensure that Microsoft Defender Recommendation for 'Apply system updates' status is 'Completed' " + "ControlDescription": "Ensure that Microsoft Defender Recommendation for 'Apply system updates' status is 'Completed'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.14", - "ControlDescription": "Ensure Any of the ASC Default Policy Settings are Not Set to 'Disabled' " + "ControlDescription": "Ensure Any of the ASC Default Policy Settings are Not Set to 'Disabled'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.15", - "ControlDescription": "Ensure that Auto provisioning of 'Log Analytics agent for Azure VMs' is Set to 'On' " + "ControlDescription": "Ensure that Auto provisioning of 'Log Analytics agent for Azure VMs' is Set to 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.16", - "ControlDescription": "Ensure that Auto provisioning of 'Vulnerability assessment for machines' is Set to 'On' " + "ControlDescription": "Ensure that Auto provisioning of 'Vulnerability assessment for machines' is Set to 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.17", - "ControlDescription": "Ensure that Auto provisioning of 'Microsoft Defender for Containers components' is Set to 'On' " + "ControlDescription": "Ensure that Auto provisioning of 'Microsoft Defender for Containers components' is Set to 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.18", - "ControlDescription": "Ensure That 'All users with the following roles' is set to 'Owner' " + "ControlDescription": "Ensure That 'All users with the following roles' is set to 'Owner'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.19", - "ControlDescription": "Ensure 'Additional email addresses' is Configured with a Security Contact Email " + "ControlDescription": "Ensure 'Additional email addresses' is Configured with a Security Contact Email" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.20", - "ControlDescription": "Ensure That 'Notify about alerts with the following severity' is Set to 'High' " + "ControlDescription": "Ensure That 'Notify about alerts with the following severity' is Set to 'High'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.21", - "ControlDescription": "Ensure that Microsoft Defender for Cloud Apps integration with Microsoft Defender for Cloud is Selected " + "ControlDescription": "Ensure that Microsoft Defender for Cloud Apps integration with Microsoft Defender for Cloud is Selected" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.1.22", - "ControlDescription": "Ensure that Microsoft Defender for Endpoint integration with Microsoft Defender for Cloud is selected " + "ControlDescription": "Ensure that Microsoft Defender for Endpoint integration with Microsoft Defender for Cloud is selected" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 2.2.1", - "ControlDescription": "Ensure That Microsoft Defender for IoT Hub Is Set To 'On' " + "ControlDescription": "Ensure That Microsoft Defender for IoT Hub Is Set To 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.1", - "ControlDescription": "Ensure that 'Secure transfer required' is set to 'Enabled' " + "ControlDescription": "Ensure that 'Secure transfer required' is set to 'Enabled'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.2", - "ControlDescription": "Ensure that \u0081eEnable Infrastructure Encryption\u0081f for Each Storage Account in Azure Storage is Set to \u0081eenabled\u0081f " + "ControlDescription": "Ensure that \u0081eEnable Infrastructure Encryption\u0081f for Each Storage Account in Azure Storage is Set to \u0081eenabled\u0081f" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.3", - "ControlDescription": "Ensure that 'Enable key rotation reminders' is enabled for each Storage Account " + "ControlDescription": "Ensure that 'Enable key rotation reminders' is enabled for each Storage Account" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.4", - "ControlDescription": "Ensure that Storage Account Access Keys are Periodically Regenerated " + "ControlDescription": "Ensure that Storage Account Access Keys are Periodically Regenerated" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.5", - "ControlDescription": "Ensure Storage Logging is Enabled for Queue Service for 'Read', 'Write', and 'Delete' requests " + "ControlDescription": "Ensure Storage Logging is Enabled for Queue Service for 'Read', 'Write', and 'Delete' requests" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.6", - "ControlDescription": "Ensure that Shared Access Signature Tokens Expire Within an Hour " + "ControlDescription": "Ensure that Shared Access Signature Tokens Expire Within an Hour" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.7", - "ControlDescription": "Ensure that 'Public access level' is disabled for storage accounts with blob containers " + "ControlDescription": "Ensure that 'Public access level' is disabled for storage accounts with blob containers" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.8", - "ControlDescription": "Ensure Default Network Access Rule for Storage Accounts is Set to Deny " + "ControlDescription": "Ensure Default Network Access Rule for Storage Accounts is Set to Deny" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.9", - "ControlDescription": "Ensure 'Allow Azure services on the trusted services list to access this storage account' is Enabled for Storage Account Access " + "ControlDescription": "Ensure 'Allow Azure services on the trusted services list to access this storage account' is Enabled for Storage Account Access" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.1", - "ControlDescription": "Ensure Private Endpoints are used to access Storage Accounts " + "ControlDescription": "Ensure Private Endpoints are used to access Storage Accounts" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.11", - "ControlDescription": "Ensure Soft Delete is Enabled for Azure Containers and Blob Storage " + "ControlDescription": "Ensure Soft Delete is Enabled for Azure Containers and Blob Storage" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.12", - "ControlDescription": "Ensure Storage for Critical Data are Encrypted with Customer Managed Keys " + "ControlDescription": "Ensure Storage for Critical Data are Encrypted with Customer Managed Keys" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.13", - "ControlDescription": "Ensure Storage logging is Enabled for Blob Service for 'Read', 'Write', and 'Delete' requests " + "ControlDescription": "Ensure Storage logging is Enabled for Blob Service for 'Read', 'Write', and 'Delete' requests" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.14", - "ControlDescription": "Ensure Storage Logging is Enabled for Table Service for 'Read', 'Write', and 'Delete' Requests " + "ControlDescription": "Ensure Storage Logging is Enabled for Table Service for 'Read', 'Write', and 'Delete' Requests" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 3.15", - "ControlDescription": "Ensure the \"Minimum TLS version\" for storage accounts is set to \"Version 1.2\" " + "ControlDescription": "Ensure the \"Minimum TLS version\" for storage accounts is set to \"Version 1.2\"" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.1.1", - "ControlDescription": "Ensure that 'Auditing' is set to 'On' " + "ControlDescription": "Ensure that 'Auditing' is set to 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.1.2", - "ControlDescription": "Ensure no Azure SQL Databases allow ingress from 0.0.0.0/0 (ANY IP) " + "ControlDescription": "Ensure no Azure SQL Databases allow ingress from 0.0.0.0/0 (ANY IP)" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.1.3", - "ControlDescription": "Ensure SQL server's Transparent Data Encryption (TDE) protector is encrypted with Customer-managed key " + "ControlDescription": "Ensure SQL server's Transparent Data Encryption (TDE) protector is encrypted with Customer-managed key" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.1.4", - "ControlDescription": "Ensure that Azure Active Directory Admin is Configured for SQL Servers " + "ControlDescription": "Ensure that Azure Active Directory Admin is Configured for SQL Servers" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.1.5", - "ControlDescription": "Ensure that 'Data encryption' is set to 'On' on a SQL Database " + "ControlDescription": "Ensure that 'Data encryption' is set to 'On' on a SQL Database" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.1.6", - "ControlDescription": "Ensure that 'Auditing' Retention is 'greater than 90 days' " + "ControlDescription": "Ensure that 'Auditing' Retention is 'greater than 90 days'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.2.1", - "ControlDescription": "Ensure that Microsoft Defender for SQL is set to 'On' for critical SQL Servers " + "ControlDescription": "Ensure that Microsoft Defender for SQL is set to 'On' for critical SQL Servers" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.2.2", - "ControlDescription": "Ensure that Vulnerability Assessment (VA) is enabled on a SQL server by setting a Storage Account " + "ControlDescription": "Ensure that Vulnerability Assessment (VA) is enabled on a SQL server by setting a Storage Account" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.2.3", - "ControlDescription": "Ensure that Vulnerability Assessment (VA) setting 'Periodic recurring scans' is set to 'on' for each SQL server " + "ControlDescription": "Ensure that Vulnerability Assessment (VA) setting 'Periodic recurring scans' is set to 'on' for each SQL server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.2.4", - "ControlDescription": "Ensure that Vulnerability Assessment (VA) setting 'Send scan reports to' is configured for a SQL server " + "ControlDescription": "Ensure that Vulnerability Assessment (VA) setting 'Send scan reports to' is configured for a SQL server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.2.5", - "ControlDescription": "Ensure that Vulnerability Assessment (VA) setting 'Also send email notifications to admins and subscription owners' is set for each SQL Server " + "ControlDescription": "Ensure that Vulnerability Assessment (VA) setting 'Also send email notifications to admins and subscription owners' is set for each SQL Server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.3.1", - "ControlDescription": "Ensure 'Enforce SSL connection' is set to 'ENABLED' for PostgreSQL Database Server " + "ControlDescription": "Ensure 'Enforce SSL connection' is set to 'ENABLED' for PostgreSQL Database Server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.3.2", - "ControlDescription": "Ensure Server Parameter 'log_checkpoints' is set to 'ON' for PostgreSQL Database Server " + "ControlDescription": "Ensure Server Parameter 'log_checkpoints' is set to 'ON' for PostgreSQL Database Server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.3.3", - "ControlDescription": "Ensure server parameter 'log_connections' is set to 'ON' for PostgreSQL Database Server " + "ControlDescription": "Ensure server parameter 'log_connections' is set to 'ON' for PostgreSQL Database Server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.3.4", - "ControlDescription": "Ensure server parameter 'log_disconnections' is set to 'ON' for PostgreSQL Database Server " + "ControlDescription": "Ensure server parameter 'log_disconnections' is set to 'ON' for PostgreSQL Database Server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.3.5", - "ControlDescription": "Ensure server parameter 'connection_throttling' is set to 'ON' for PostgreSQL Database Server " + "ControlDescription": "Ensure server parameter 'connection_throttling' is set to 'ON' for PostgreSQL Database Server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.3.6", - "ControlDescription": "Ensure Server Parameter 'log_retention_days' is greater than 3 days for PostgreSQL Database Server " + "ControlDescription": "Ensure Server Parameter 'log_retention_days' is greater than 3 days for PostgreSQL Database Server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.3.7", - "ControlDescription": "Ensure 'Allow access to Azure services' for PostgreSQL Database Server is disabled " + "ControlDescription": "Ensure 'Allow access to Azure services' for PostgreSQL Database Server is disabled" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.3.8", - "ControlDescription": "Ensure 'Infrastructure double encryption' for PostgreSQL Database Server is 'Enabled' " + "ControlDescription": "Ensure 'Infrastructure double encryption' for PostgreSQL Database Server is 'Enabled'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.4.1", - "ControlDescription": "Ensure 'Enforce SSL connection' is set to 'Enabled' for Standard MySQL Database Server " + "ControlDescription": "Ensure 'Enforce SSL connection' is set to 'Enabled' for Standard MySQL Database Server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.4.2", - "ControlDescription": "Ensure 'TLS Version' is set to 'TLSV1.2' for MySQL flexible Database Server " + "ControlDescription": "Ensure 'TLS Version' is set to 'TLSV1.2' for MySQL flexible Database Server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.4.3", - "ControlDescription": "Ensure server parameter 'audit_log_enabled' is set to 'ON' for MySQL Database Server " + "ControlDescription": "Ensure server parameter 'audit_log_enabled' is set to 'ON' for MySQL Database Server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.4.4", - "ControlDescription": "Ensure server parameter 'audit_log_events' has 'CONNECTION' set for MySQL Database Server " + "ControlDescription": "Ensure server parameter 'audit_log_events' has 'CONNECTION' set for MySQL Database Server" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.5.1", - "ControlDescription": "Ensure That 'Firewalls & Networks' Is Limited to Use Selected Networks Instead of All Networks " + "ControlDescription": "Ensure That 'Firewalls & Networks' Is Limited to Use Selected Networks Instead of All Networks" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.5.2", - "ControlDescription": "Ensure That Private Endpoints Are Used Where Possible " + "ControlDescription": "Ensure That Private Endpoints Are Used Where Possible" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 4.5.3", - "ControlDescription": "Use Azure Active Directory (AAD) Client Authentication and Azure RBAC where possible. " + "ControlDescription": "Use Azure Active Directory (AAD) Client Authentication and Azure RBAC where possible." }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.1.1", - "ControlDescription": "Ensure that a 'Diagnostic Setting' exists " + "ControlDescription": "Ensure that a 'Diagnostic Setting' exists" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.1.2", - "ControlDescription": "Ensure Diagnostic Setting captures appropriate categories " + "ControlDescription": "Ensure Diagnostic Setting captures appropriate categories" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.1.3", - "ControlDescription": "Ensure the Storage Container Storing the Activity Logs is not Publicly Accessible " + "ControlDescription": "Ensure the Storage Container Storing the Activity Logs is not Publicly Accessible" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.1.4", - "ControlDescription": "Ensure the storage account containing the container with activity logs is encrypted with Customer Managed Key " + "ControlDescription": "Ensure the storage account containing the container with activity logs is encrypted with Customer Managed Key" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.1.5", - "ControlDescription": "Ensure that logging for Azure Key Vault is 'Enabled' " + "ControlDescription": "Ensure that logging for Azure Key Vault is 'Enabled'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.1.6", - "ControlDescription": "Ensure that Network Security Group Flow logs are captured and sent to Log Analytics " + "ControlDescription": "Ensure that Network Security Group Flow logs are captured and sent to Log Analytics" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.1.7", - "ControlDescription": "Ensure that logging for Azure AppService 'HTTP logs' is enabled " + "ControlDescription": "Ensure that logging for Azure AppService 'HTTP logs' is enabled" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.2.1", - "ControlDescription": "Ensure that Activity Log Alert exists for Create Policy Assignment " + "ControlDescription": "Ensure that Activity Log Alert exists for Create Policy Assignment" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.2.2", - "ControlDescription": "Ensure that Activity Log Alert exists for Delete Policy Assignment " + "ControlDescription": "Ensure that Activity Log Alert exists for Delete Policy Assignment" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.2.3", - "ControlDescription": "Ensure that Activity Log Alert exists for Create or Update Network Security Group " + "ControlDescription": "Ensure that Activity Log Alert exists for Create or Update Network Security Group" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.2.4", - "ControlDescription": "Ensure that Activity Log Alert exists for Delete Network Security Group " + "ControlDescription": "Ensure that Activity Log Alert exists for Delete Network Security Group" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.2.5", - "ControlDescription": "Ensure that Activity Log Alert exists for Create or Update Security Solution " + "ControlDescription": "Ensure that Activity Log Alert exists for Create or Update Security Solution" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.2.6", - "ControlDescription": "Ensure that Activity Log Alert exists for Delete Security Solution " + "ControlDescription": "Ensure that Activity Log Alert exists for Delete Security Solution" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.2.7", - "ControlDescription": "Ensure that Activity Log Alert exists for Create or Update SQL Server Firewall Rule " + "ControlDescription": "Ensure that Activity Log Alert exists for Create or Update SQL Server Firewall Rule" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.2.8", - "ControlDescription": "Ensure that Activity Log Alert exists for Delete SQL Server Firewall Rule " + "ControlDescription": "Ensure that Activity Log Alert exists for Delete SQL Server Firewall Rule" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.2.9", - "ControlDescription": "Ensure that Activity Log Alert exists for Create or Update Public IP Address rule " + "ControlDescription": "Ensure that Activity Log Alert exists for Create or Update Public IP Address rule" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.2.10", - "ControlDescription": "Ensure that Activity Log Alert exists for Delete Public IP Address rule " + "ControlDescription": "Ensure that Activity Log Alert exists for Delete Public IP Address rule" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.3.1", - "ControlDescription": "Ensure Application Insights are Configured " + "ControlDescription": "Ensure Application Insights are Configured" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.4", - "ControlDescription": "Ensure that Azure Monitor Resource Logging is Enabled for All Services that Support it " + "ControlDescription": "Ensure that Azure Monitor Resource Logging is Enabled for All Services that Support it" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 5.5", - "ControlDescription": "Ensure that SKU Basic/Consumption is not used on artifacts that need to be monitored (Particularly for Production Workloads) " + "ControlDescription": "Ensure that SKU Basic/Consumption is not used on artifacts that need to be monitored (Particularly for Production Workloads)" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 6.1", - "ControlDescription": "Ensure that RDP access from the Internet is evaluated and restricted " + "ControlDescription": "Ensure that RDP access from the Internet is evaluated and restricted" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 6.2", - "ControlDescription": "Ensure that SSH access from the Internet is evaluated and restricted " + "ControlDescription": "Ensure that SSH access from the Internet is evaluated and restricted" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 6.3", - "ControlDescription": "Ensure that UDP access from the Internet is evaluated and restricted " + "ControlDescription": "Ensure that UDP access from the Internet is evaluated and restricted" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 6.4", - "ControlDescription": "Ensure that HTTP(S) access from the Internet is evaluated and restricted " + "ControlDescription": "Ensure that HTTP(S) access from the Internet is evaluated and restricted" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 6.5", - "ControlDescription": "Ensure that Network Security Group Flow Log retention period is 'greater than 90 days' " + "ControlDescription": "Ensure that Network Security Group Flow Log retention period is 'greater than 90 days'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 6.6", - "ControlDescription": "Ensure that Network Watcher is 'Enabled' " + "ControlDescription": "Ensure that Network Watcher is 'Enabled'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 6.7", - "ControlDescription": "Ensure that Public IP addresses are Evaluated on a Periodic Basis " + "ControlDescription": "Ensure that Public IP addresses are Evaluated on a Periodic Basis" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 7.1", - "ControlDescription": "Ensure an Azure Bastion Host Exists " + "ControlDescription": "Ensure an Azure Bastion Host Exists" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 7.2", - "ControlDescription": "Ensure Virtual Machines are utilizing Managed Disks " + "ControlDescription": "Ensure Virtual Machines are utilizing Managed Disks" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 7.3", - "ControlDescription": "Ensure that 'OS and Data' disks are encrypted with Customer Managed Key (CMK) " + "ControlDescription": "Ensure that 'OS and Data' disks are encrypted with Customer Managed Key (CMK)" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 7.4", - "ControlDescription": "Ensure that 'Unattached disks' are encrypted with 'Customer Managed Key' (CMK) " + "ControlDescription": "Ensure that 'Unattached disks' are encrypted with 'Customer Managed Key' (CMK)" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 7.5", - "ControlDescription": "Ensure that Only Approved Extensions Are Installed " + "ControlDescription": "Ensure that Only Approved Extensions Are Installed" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 7.6", - "ControlDescription": "Ensure that Endpoint Protection for all Virtual Machines is installed " + "ControlDescription": "Ensure that Endpoint Protection for all Virtual Machines is installed" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 7.7", - "ControlDescription": "[Legacy] Ensure that VHDs are Encrypted " + "ControlDescription": "[Legacy] Ensure that VHDs are Encrypted" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 8.1", - "ControlDescription": "Ensure that the Expiration Date is set for all Keys in RBAC Key Vaults " + "ControlDescription": "Ensure that the Expiration Date is set for all Keys in RBAC Key Vaults" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 8.2", - "ControlDescription": "Ensure that the Expiration Date is set for all Keys in Non- RBAC Key Vaults. " + "ControlDescription": "Ensure that the Expiration Date is set for all Keys in Non- RBAC Key Vaults." }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 8.3", - "ControlDescription": "Ensure that the Expiration Date is set for all Secrets in RBAC Key Vaults " + "ControlDescription": "Ensure that the Expiration Date is set for all Secrets in RBAC Key Vaults" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 8.4", - "ControlDescription": "Ensure that the Expiration Date is set for all Secrets in Non-RBAC Key Vaults " + "ControlDescription": "Ensure that the Expiration Date is set for all Secrets in Non-RBAC Key Vaults" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 8.5", - "ControlDescription": "Ensure the Key Vault is Recoverable " + "ControlDescription": "Ensure the Key Vault is Recoverable" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 8.6", - "ControlDescription": "Enable Role Based Access Control for Azure Key Vault " + "ControlDescription": "Enable Role Based Access Control for Azure Key Vault" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 8.7", - "ControlDescription": "Ensure that Private Endpoints are Used for Azure Key Vault " + "ControlDescription": "Ensure that Private Endpoints are Used for Azure Key Vault" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 8.8", - "ControlDescription": "Ensure Automatic Key Rotation is Enabled Within Azure Key Vault for the Supported Services " + "ControlDescription": "Ensure Automatic Key Rotation is Enabled Within Azure Key Vault for the Supported Services" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 9.1", - "ControlDescription": "Ensure App Service Authentication is set up for apps in Azure App Service " + "ControlDescription": "Ensure App Service Authentication is set up for apps in Azure App Service" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 9.2", - "ControlDescription": "Ensure Web App Redirects All HTTP traffic to HTTPS in Azure App Service " + "ControlDescription": "Ensure Web App Redirects All HTTP traffic to HTTPS in Azure App Service" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 9.3", - "ControlDescription": "Ensure Web App is using the latest version of TLS encryption " + "ControlDescription": "Ensure Web App is using the latest version of TLS encryption" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 9.4", - "ControlDescription": "Ensure the web app has 'Client Certificates (Incoming client certificates)' set to 'On' " + "ControlDescription": "Ensure the web app has 'Client Certificates (Incoming client certificates)' set to 'On'" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 9.5", - "ControlDescription": "Ensure that Register with Azure Active Directory is enabled on App Service " + "ControlDescription": "Ensure that Register with Azure Active Directory is enabled on App Service" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 9.6", - "ControlDescription": "Ensure That 'PHP version' is the Latest, If Used to Run the Web App " + "ControlDescription": "Ensure That 'PHP version' is the Latest, If Used to Run the Web App" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 9.7", - "ControlDescription": "Ensure that 'Python version' is the Latest Stable Version, if Used to Run the Web App " + "ControlDescription": "Ensure that 'Python version' is the Latest Stable Version, if Used to Run the Web App" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 9.8", - "ControlDescription": "Ensure that 'Java version' is the latest, if used to run the Web App " + "ControlDescription": "Ensure that 'Java version' is the latest, if used to run the Web App" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 9.9", - "ControlDescription": "Ensure that 'HTTP Version' is the Latest, if Used to Run the Web App " + "ControlDescription": "Ensure that 'HTTP Version' is the Latest, if Used to Run the Web App" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 9.1", - "ControlDescription": "Ensure FTP deployments are Disabled " + "ControlDescription": "Ensure FTP deployments are Disabled" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 9.11", - "ControlDescription": "Ensure Azure Key Vaults are Used to Store Secrets " + "ControlDescription": "Ensure Azure Key Vaults are Used to Store Secrets" }, { "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 10.1", - "ControlDescription": "Ensure that Resource Locks are set for Mission-Critical Azure Resources " + "ControlDescription": "Ensure that Resource Locks are set for Mission-Critical Azure Resources" }, { - "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 10.1", - "ControlDescription": "Ensure that Resource Locks are set for Mission-Critical Azure Resources " + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.1", + "ControlDescription": "Ensure single sign-on (SSO) is configured for your account / organization" }, { - "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 10.1", - "ControlDescription": "Ensure that Resource Locks are set for Mission-Critical Azure Resources " + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.2", + "ControlDescription": "Ensure Snowflake SCIM integration is configured to automatically provision and deprovision users and groups (i.e. roles)" }, { - "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 10.1", - "ControlDescription": "Ensure that Resource Locks are set for Mission-Critical Azure Resources " + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.4", + "ControlDescription": "Ensure multi-factor authentication (MFA) is turned on for all human users with password-based authentication" }, { - "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 10.1", - "ControlDescription": "Ensure that Resource Locks are set for Mission-Critical Azure Resources " + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.5", + "ControlDescription": "Ensure minimum password length is set to 14 characters or more" }, { - "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 10.1", - "ControlDescription": "Ensure that Resource Locks are set for Mission-Critical Azure Resources " + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.6", + "ControlDescription": "Ensure that service accounts use key pair authentication" }, { - "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 10.1", - "ControlDescription": "Ensure that Resource Locks are set for Mission-Critical Azure Resources " + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.7", + "ControlDescription": "Ensure authentication key pairs are rotated every 180 days" }, { - "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 10.1", - "ControlDescription": "Ensure that Resource Locks are set for Mission-Critical Azure Resources " + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.8", + "ControlDescription": "Ensure that users who did not log in for 90 days are disabled" }, { - "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 10.1", - "ControlDescription": "Ensure that Resource Locks are set for Mission-Critical Azure Resources " + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.9", + "ControlDescription": "Ensure that the idle session timeout is set to 15 minutes or less for users with the ACCOUNTADMIN and SECURITYADMIN roles" }, { - "ControlTitle": "CIS Microsoft Azure Foundations Benchmark V2.0.0 10.1", - "ControlDescription": "Ensure that Resource Locks are set for Mission-Critical Azure Resources " + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.10", + "ControlDescription": "Limit the number of users with ACCOUNTADMIN and SECURITYADMIN" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.11", + "ControlDescription": "Ensure that all users granted the ACCOUNTADMIN role have an email address assigned" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.12", + "ControlDescription": "Ensure that no users have ACCOUNTADMIN or SECURITYADMIN as the default role" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.13", + "ControlDescription": "Ensure that the ACCOUNTADMIN or SECURITYADMIN role is not granted to any custom role" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.14", + "ControlDescription": "Ensure that Snowflake tasks are not owned by the ACCOUNTADMIN or SECURITYADMIN roles" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.15", + "ControlDescription": "Ensure that Snowflake tasks do not run with the ACCOUNTADMIN or SECURITYADMIN role privileges" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.16", + "ControlDescription": "Ensure that Snowflake stored procedures are not owned by the ACCOUNTADMIN or SECURITYADMIN roles" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 1.17", + "ControlDescription": "Ensure Snowflake stored procedures do not run with ACCOUNTADMIN or SECURITYADMIN role privileges" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 2.1", + "ControlDescription": "Ensure monitoring and alerting exist for ACCOUNTADMIN and SECURITYADMIN role grants" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 2.2", + "ControlDescription": "Ensure monitoring and alerting exist for MANAGE GRANTS privilege grants" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 2.3", + "ControlDescription": "Ensure monitoring and alerting exist for password sign-ins of SSO users" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 2.4", + "ControlDescription": "Ensure monitoring and alerting exist for password sign-in without MFA" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 2.5", + "ControlDescription": "Ensure monitoring and alerting exist for creation, update and deletion of security integrations" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 2.6", + "ControlDescription": "Ensure monitoring and alerting exist for changes to network policies and associated objects" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 2.7", + "ControlDescription": "Ensure monitoring and alerting exist for SCIM token creation" + }, + { + "ControlTitle": "CIS Snowflake Foundations Benchmark V1.0.0 3.1", + "ControlDescription": "Ensure that an account-level network policy has been configured to only allow access from trusted IP addresses" } ] \ No newline at end of file diff --git a/eeauditor/processor/outputs/firemon_cloud_defense_output.py b/eeauditor/processor/outputs/firemon_cloud_defense_output.py deleted file mode 100644 index 4b0bf5c9..00000000 --- a/eeauditor/processor/outputs/firemon_cloud_defense_output.py +++ /dev/null @@ -1,168 +0,0 @@ -#This file is part of ElectricEye. -#SPDX-License-Identifier: Apache-2.0 - -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at - -#http://www.apache.org/licenses/LICENSE-2.0 - -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. - -import tomli -import boto3 -import sys -import json -import os -import requests -from time import sleep -from botocore.exceptions import ClientError -from processor.outputs.output_base import ElectricEyeOutput - -# Boto3 Clients -ssm = boto3.client("ssm") -asm = boto3.client("secretsmanager") - -# These Constants define legitimate values for certain parameters within the external_providers.toml file -CREDENTIALS_LOCATION_CHOICES = ["AWS_SSM", "AWS_SECRETS_MANAGER", "CONFIG_FILE"] - -@ElectricEyeOutput -class FiremonCloudDefenseProvider(object): - __provider__ = "firemon_cloud_defense" - - def __init__(self): - print("Preparing Firemon Cloud Defense (DisruptOps) credentials.") - - if os.environ["TOML_FILE_PATH"] == "None": - # Get the absolute path of the current directory - currentDir = os.path.abspath(os.path.dirname(__file__)) - # Go two directories back to /eeauditor/ - twoBack = os.path.abspath(os.path.join(currentDir, "../../")) - # TOML is located in /eeauditor/ directory - tomlFile = f"{twoBack}/external_providers.toml" - else: - tomlFile = os.environ["TOML_FILE_PATH"] - - with open(tomlFile, "rb") as f: - data = tomli.load(f) - - # Parse from [global] to determine credential location of PostgreSQL Password - if data["global"]["credentials_location"] not in CREDENTIALS_LOCATION_CHOICES: - print(f"Invalid option for [global.credentials_location]. Must be one of {str(CREDENTIALS_LOCATION_CHOICES)}.") - sys.exit(2) - self.credentials_location = data["global"]["credentials_location"] - - # Variable for the entire [outputs.firemon_cloud_defense] section - fcdDetails = data["outputs"]["firemon_cloud_defense"] - - # Parse Client ID - if self.credentials_location == "CONFIG_FILE": - clientId = fcdDetails["firemon_cloud_defense_client_id_value"] - elif self.credentials_location == "AWS_SSM": - clientId = self.get_credential_from_aws_ssm( - fcdDetails["firemon_cloud_defense_client_id_value"], - "firemon_cloud_defense_client_id_value" - ) - elif self.credentials_location == "AWS_SECRETS_MANAGER": - clientId = self.get_credential_from_aws_secrets_manager( - fcdDetails["firemon_cloud_defense_client_id_value"], - "firemon_cloud_defense_client_id_value" - ) - # Parse API Key - if self.credentials_location == "CONFIG_FILE": - apiKey = fcdDetails["firemon_cloud_defense_api_key_value"] - elif self.credentials_location == "AWS_SSM": - apiKey = self.get_credential_from_aws_ssm( - fcdDetails["firemon_cloud_defense_api_key_value"], - "firemon_cloud_defense_api_key_value" - ) - elif self.credentials_location == "AWS_SECRETS_MANAGER": - apiKey = self.get_credential_from_aws_secrets_manager( - fcdDetails["firemon_cloud_defense_api_key_value"], - "firemon_cloud_defense_api_key_value" - ) - - # Ensure that values are provided for all variable - use all() and a list comprehension to check the vars - # empty strings will trigger `if not` - if not all(s for s in [clientId, apiKey]): - print("An empty value was detected in '[outputs.firemon_cloud_defense]'. Review the TOML file and try again!") - sys.exit(2) - - self.url = "https://collector.prod.disruptops.com/event" - self.clientId = clientId - self.apiKey = apiKey - - def write_findings(self, findings: list, **kwargs): - if len(findings) == 0: - print("There are not any findings to write!") - exit(0) - # Use another list comprehension to remove `ProductFields.AssetDetails` from non-Asset reporting outputs - noDetails = [ - {**d, "ProductFields": {k: v for k, v in d["ProductFields"].items() if k != "AssetDetails"}} for d in findings - ] - del findings - - print(f"Writing {len(noDetails)} results to Firemon Cloud Defense (DisruptOps).") - - for finding in noDetails: - r = requests.post( - self.url, - data=json.dumps(finding), - auth=(self.clientId, self.apiKey) - ) - if r.status_code == 429: - sleep(0.5) - elif r.status_code == (400, 401, 403, 404): - raise r.json() - - def get_credential_from_aws_ssm(self, value, configurationName): - """ - Retrieves a TOML variable from AWS Systems Manager Parameter Store and returns it - """ - - # Check that a value was provided - if value == (None or ""): - print(f"A value for {configurationName} was not provided. Fix the TOML file and run ElectricEye again.") - sys.exit(2) - - # Retrieve the credential from SSM Parameter Store - try: - credential = ssm.get_parameter( - Name=value, - WithDecryption=True - )["Parameter"]["Value"] - except ClientError as e: - raise e - - return credential - - def get_credential_from_aws_secrets_manager(self, value, configurationName): - """ - Retrieves a TOML variable from AWS Secrets Manager and returns it - """ - - # Check that a value was provided - if value == (None or ""): - print(f"A value for {configurationName} was not provided. Fix the TOML file and run ElectricEye again.") - sys.exit(2) - - # Retrieve the credential from AWS Secrets Manager - try: - credential = asm.get_secret_value( - SecretId=value, - )["SecretString"] - except ClientError as e: - raise e - - return credential - - # EOF \ No newline at end of file diff --git a/eeauditor/processor/outputs/iconography.yaml b/eeauditor/processor/outputs/iconography.yaml index 351b029c..4f36f8d8 100644 --- a/eeauditor/processor/outputs/iconography.yaml +++ b/eeauditor/processor/outputs/iconography.yaml @@ -282,4 +282,13 @@ - AssetService: Microsoft Defender for Cloud ImageTag: - AssetService: Azure Application Insights - ImageTag: \ No newline at end of file + ImageTag: +############# +# SNOWFLAKE # +############# +- AssetService: Snowflake Users + ImageTag: +- AssetService: Snowflake Account + ImageTag: +- AssetService: Snowflake Password Policy + ImageTag: \ No newline at end of file diff --git a/eeauditor/processor/outputs/ocsf_stdout.py b/eeauditor/processor/outputs/ocsf_stdout.py index 65ad6e50..d19fa875 100644 --- a/eeauditor/processor/outputs/ocsf_stdout.py +++ b/eeauditor/processor/outputs/ocsf_stdout.py @@ -58,10 +58,11 @@ "CIS Amazon Web Services Foundations Benchmark V3.0", "MITRE ATT&CK", "CIS AWS Database Services Benchmark V1.0", - "CIS Microsoft Azure Foundations Benchmark V2.0.0" + "CIS Microsoft Azure Foundations Benchmark V2.0.0", + "CIS Snowflake Foundations Benchmark V1.0.0" ] -class AsffOcsfNormalizedMapping(NamedTuple): +class SeverityAccountTypeComplianceMapping(NamedTuple): severityId: int severity: str cloudAccountTypeId: int @@ -69,6 +70,14 @@ class AsffOcsfNormalizedMapping(NamedTuple): complianceStatusId: int complianceStatus: str +class ActivityStatusTypeMapping(NamedTuple): + activityId: int + activityName: str + statusId: int + status: str + typeUid: int + typeName: str + here = path.abspath(path.dirname(__file__)) with open(f"{here}/mapped_compliance_controls.json") as jsonfile: CONTROLS_CROSSWALK = json.load(jsonfile) @@ -146,7 +155,7 @@ def nist_csf_v_1_1_controls_crosswalk(self, nistCsfSubcategory): except KeyError: return [] - def asff_to_ocsf_normalization(self, severityLabel: str, cloudProvider: str, complianceStatusLabel: str) -> AsffOcsfNormalizedMapping: + def compliance_finding_ocsf_normalization(self, severityLabel: str, cloudProvider: str, complianceStatusLabel: str) -> SeverityAccountTypeComplianceMapping: """ Normalizes the following ASFF Severity, Cloud Account Provider, and Compliance values into OCSF """ @@ -196,13 +205,13 @@ def asff_to_ocsf_normalization(self, severityLabel: str, cloudProvider: str, com complianceStatusId = 99 complianceStatus = complianceStatusLabel.lower().capitalize() - return AsffOcsfNormalizedMapping ( - severityId, - severity, - acctTypeId, - acctType, - complianceStatusId, - complianceStatus + return SeverityAccountTypeComplianceMapping( + severityId=severityId, + severity=severity, + cloudAccountTypeId=acctTypeId, + cloudAccountType=acctType, + complianceStatusId=complianceStatusId, + complianceStatus=complianceStatus ) def iso8061_to_epochseconds(self, iso8061: str) -> int: @@ -210,7 +219,31 @@ def iso8061_to_epochseconds(self, iso8061: str) -> int: Converts ISO 8061 datetime into Epochseconds timestamp """ return int(datetime.fromisoformat(iso8061).timestamp()) + + def record_state_to_status(self, recordState: str) -> ActivityStatusTypeMapping: + """ + Maps ElectricEye RecordState to OCSF Status + """ + if recordState == "ACTIVE": + return ActivityStatusTypeMapping( + activityId=1, + activityName="Create", + statusId=1, + status="New", + typeUid=200301, + typeName="Compliance Finding: Create" + ) + if recordState == "ARCHIVED": + return ActivityStatusTypeMapping( + activityId=3, + activityName="Close", + statusId=4, + status="Resolved", + typeUid=200303, + typeName="Compliance Finding: Close" + ) + def ocsf_compliance_finding_mapping(self, findings: list) -> list: """ Takes ElectricEye ASFF and outputs to OCSF v1.1.0 Compliance Finding (2003), returns a list of new findings @@ -221,6 +254,9 @@ def ocsf_compliance_finding_mapping(self, findings: list) -> list: logger.info("Mapping ASFF to OCSF") for finding in findings: + # Generate metadata.processed_time + timeNow = datetime.now().isoformat() + procssedTime = self.iso8061_to_epochseconds(timeNow) # check if the compliance.requirements start with the control frameworks and append the unique ones into a list for compliance.stnadards standard = [] @@ -230,55 +266,76 @@ def ocsf_compliance_finding_mapping(self, findings: list) -> list: if str(control).startswith(framework) and framework not in standard: standard.append(framework) - asffToOcsf = self.asff_to_ocsf_normalization( + asffToOcsf = self.compliance_finding_ocsf_normalization( severityLabel=finding["Severity"]["Label"], cloudProvider=finding["ProductFields"]["Provider"], complianceStatusLabel=finding["Compliance"]["Status"] ) - if finding["ProductFields"]["Provider"] == "AWS": - partition = finding["Resources"][0]["Partition"] - else: + # Non-AWS checks have hardcoded "dummy" data for Account, Region, and Partition - set these to none + provider = finding["ProductFields"]["Provider"] + partition = finding["Resources"][0]["Partition"] + region = finding["ProductFields"]["AssetRegion"] + accountId = finding["ProductFields"]["ProviderAccountId"] + + if provider != "AWS" or partition == "not-aws": partition = None + + if region == "us-placeholder-1": + region = None + + if region == "aws-global": + region = "us-east-1" + + if accountId == "000000000000": + accountId = None + + eventTime = self.iso8061_to_epochseconds(finding["CreatedAt"]) + + recordState = finding["RecordState"] + recordStateMapping = self.record_state_to_status(recordState) ocsf = { # Base Event data - "activity_id": 1, - "activity_name": "Create", + "activity_id": recordStateMapping.activityId, + "activity_name": recordStateMapping.activityName, "category_name": "Findings", "category_uid": 2, "class_name": "Compliance Finding", "class_uid": 2003, "confidence_score": finding["Confidence"], - "severity": asffToOcsf[1], - "severity_id": asffToOcsf[0], - "status": "New", - "status_id": 1, - "time": self.iso8061_to_epochseconds(finding["CreatedAt"]), - "type_name": "Compliance Finding: Create", - "type_uid": 200301, + "severity": asffToOcsf.severity, + "severity_id": asffToOcsf.severityId, + "status": recordStateMapping.status, + "status_id": recordStateMapping.status, + "start_time": eventTime, + "time": eventTime, + "type_name": recordStateMapping.typeName, + "type_uid": recordStateMapping.typeUid, # Profiles / Metadata "metadata": { "uid": finding["Id"], "correlation_uid": finding["GeneratorId"], - "version":"1.1.0", + "log_provider": "ElectricEye", + "logged_time": eventTime, + "original_time": finding["CreatedAt"], + "processed_time": procssedTime, + "version":"1.4.0", + "profiles":["cloud"], "product": { "name":"ElectricEye", "version":"3.0", "url_string":"https://github.com/jonrau1/ElectricEye", "vendor_name":"ElectricEye" }, - "profiles":[ - "cloud" - ] }, "cloud": { "provider": finding["ProductFields"]["Provider"], - "region": finding["ProductFields"]["AssetRegion"], + "region": region, "account": { - "uid": finding["ProductFields"]["ProviderAccountId"], - "type": asffToOcsf[3], - "type_uid": asffToOcsf[2] + "uid": accountId, + "type": asffToOcsf.cloudAccountType, + "type_uid": asffToOcsf.cloudAccountTypeId } }, # Observables @@ -286,9 +343,9 @@ def ocsf_compliance_finding_mapping(self, findings: list) -> list: # Cloud Account (Project) UID { "name": "cloud.account.uid", - "type": "Resource UID", - "type_id": 10, - "value": finding["ProductFields"]["ProviderAccountId"] + "type": "Account UID", + "type_id": 35, + "value": accountId }, # Resource UID { @@ -307,7 +364,7 @@ def ocsf_compliance_finding_mapping(self, findings: list) -> list: "status_id": asffToOcsf[4] }, "finding_info": { - "created_time": self.iso8061_to_epochseconds(finding["CreatedAt"]), + "created_time": eventTime, "desc": finding["Description"], "first_seen_time": self.iso8061_to_epochseconds(finding["FirstObservedAt"]), "modified_time": self.iso8061_to_epochseconds(finding["UpdatedAt"]), @@ -320,13 +377,15 @@ def ocsf_compliance_finding_mapping(self, findings: list) -> list: "desc": finding["Remediation"]["Recommendation"]["Text"], "references": [finding["Remediation"]["Recommendation"]["Url"]] }, - "resource": { - "data": finding["ProductFields"]["AssetDetails"], - "cloud_partition": partition, - "region": finding["ProductFields"]["AssetRegion"], - "type": finding["ProductFields"]["AssetService"], - "uid": finding["Resources"][0]["Id"] - }, + "resources": [ + { + "data": finding["ProductFields"]["AssetDetails"], + "cloud_partition": partition, + "region": region, + "type": finding["ProductFields"]["AssetService"], + "uid": finding["Resources"][0]["Id"] + } + ], "unmapped": { "provider_type": finding["ProductFields"]["ProviderType"], "asset_class": finding["ProductFields"]["AssetClass"], diff --git a/eeauditor/processor/outputs/ocsf_to_firehose_output.py b/eeauditor/processor/outputs/ocsf_to_firehose_output.py index 2c13c5e7..cb1ca2e7 100644 --- a/eeauditor/processor/outputs/ocsf_to_firehose_output.py +++ b/eeauditor/processor/outputs/ocsf_to_firehose_output.py @@ -61,10 +61,11 @@ "CIS Amazon Web Services Foundations Benchmark V3.0", "MITRE ATT&CK", "CIS AWS Database Services Benchmark V1.0", - "CIS Microsoft Azure Foundations Benchmark V2.0.0" + "CIS Microsoft Azure Foundations Benchmark V2.0.0", + "CIS Snowflake Foundations Benchmark V1.0.0" ] -class AsffOcsfNormalizedMapping(NamedTuple): +class SeverityAccountTypeComplianceMapping(NamedTuple): severityId: int severity: str cloudAccountTypeId: int @@ -72,6 +73,14 @@ class AsffOcsfNormalizedMapping(NamedTuple): complianceStatusId: int complianceStatus: str +class ActivityStatusTypeMapping(NamedTuple): + activityId: int + activityName: str + statusId: int + status: str + typeUid: int + typeName: str + here = path.abspath(path.dirname(__file__)) with open(f"{here}/mapped_compliance_controls.json") as jsonfile: CONTROLS_CROSSWALK = json.load(jsonfile) @@ -81,7 +90,7 @@ class OcsfFirehoseOutput(object): __provider__ = "ocsf_kdf" def __init__(self): - print("Preparing to send OCSF V1.1.0 Compliance Findings to Amazon Kinesis Data Firehose.") + print("Preparing to send OCSF V1.4.0 Compliance Findings to Amazon Kinesis Data Firehose.") if environ["TOML_FILE_PATH"] == "None": # Get the absolute path of the current directory @@ -214,7 +223,7 @@ def nist_csf_v_1_1_controls_crosswalk(self, nistCsfSubcategory): except KeyError: return [] - def asff_to_ocsf_normalization(self, severityLabel: str, cloudProvider: str, complianceStatusLabel: str) -> AsffOcsfNormalizedMapping: + def compliance_finding_ocsf_normalization(self, severityLabel: str, cloudProvider: str, complianceStatusLabel: str) -> SeverityAccountTypeComplianceMapping: """ Normalizes the following ASFF Severity, Cloud Account Provider, and Compliance values into OCSF """ @@ -244,8 +253,26 @@ def asff_to_ocsf_normalization(self, severityLabel: str, cloudProvider: str, com acctTypeId = 10 acctType = "AWS Account" elif cloudProvider == "GCP": - acctTypeId = 5 - acctType = "GCP Account" + acctTypeId = 11 + acctType = "GCP Project" + elif cloudProvider == "OCI": + acctTypeId = 12 + acctType = "OCI Compartment" + elif cloudProvider == "Azure": + acctTypeId = 13 + acctType = "Azure Subscription" + elif cloudProvider == "Salesforce": + acctTypeId = 14 + acctType = "Salesforce Account" + elif cloudProvider == "Google Workspace": + acctTypeId = 15 + acctType = "Google Workspace" + elif cloudProvider == "ServiceNow": + acctTypeId = 16 + acctType = "ServiceNow Instance" + elif cloudProvider == "M365": + acctTypeId = 17 + acctType = "M365 Tenant" else: acctTypeId = 99 acctType = cloudProvider @@ -264,13 +291,13 @@ def asff_to_ocsf_normalization(self, severityLabel: str, cloudProvider: str, com complianceStatusId = 99 complianceStatus = complianceStatusLabel.lower().capitalize() - return ( - severityId, - severity, - acctTypeId, - acctType, - complianceStatusId, - complianceStatus + return SeverityAccountTypeComplianceMapping( + severityId=severityId, + severity=severity, + cloudAccountTypeId=acctTypeId, + cloudAccountType=acctType, + complianceStatusId=complianceStatusId, + complianceStatus=complianceStatus ) def iso8061_to_epochseconds(self, iso8061: str) -> int: @@ -278,7 +305,31 @@ def iso8061_to_epochseconds(self, iso8061: str) -> int: Converts ISO 8061 datetime into Epochseconds timestamp """ return int(datetime.fromisoformat(iso8061).timestamp()) + + def record_state_to_status(self, recordState: str) -> ActivityStatusTypeMapping: + """ + Maps ElectricEye RecordState to OCSF Status + """ + if recordState == "ACTIVE": + return ActivityStatusTypeMapping( + activityId=1, + activityName="Create", + statusId=1, + status="New", + typeUid=200301, + typeName="Compliance Finding: Create" + ) + if recordState == "ARCHIVED": + return ActivityStatusTypeMapping( + activityId=3, + activityName="Close", + statusId=4, + status="Resolved", + typeUid=200303, + typeName="Compliance Finding: Close" + ) + def ocsf_compliance_finding_mapping(self, findings: list) -> list: """ Takes ElectricEye ASFF and outputs to OCSF v1.1.0 Compliance Finding (2003), returns a list of new findings @@ -289,62 +340,98 @@ def ocsf_compliance_finding_mapping(self, findings: list) -> list: logger.info("Mapping ASFF to OCSF") for finding in findings: - - asffToOcsf = self.asff_to_ocsf_normalization( + # Generate metadata.processed_time + timeNow = datetime.now().isoformat() + procssedTime = self.iso8061_to_epochseconds(timeNow) + + # check if the compliance.requirements start with the control frameworks and append the unique ones into a list for compliance.stnadards + standard = [] + requirements = finding["Compliance"]["RelatedRequirements"] + for control in requirements: + for framework in SUPPORTED_FRAMEWORKS: + if str(control).startswith(framework) and framework not in standard: + standard.append(framework) + + asffToOcsf = self.compliance_finding_ocsf_normalization( severityLabel=finding["Severity"]["Label"], cloudProvider=finding["ProductFields"]["Provider"], complianceStatusLabel=finding["Compliance"]["Status"] ) + + # Non-AWS checks have hardcoded "dummy" data for Account, Region, and Partition - set these to none + provider = finding["ProductFields"]["Provider"] + partition = finding["Resources"][0]["Partition"] + region = finding["ProductFields"]["AssetRegion"] + accountId = finding["ProductFields"]["ProviderAccountId"] + + if provider != "AWS" or partition == "not-aws": + partition = None + + if region == "us-placeholder-1": + region = None + + if region == "aws-global": + region = "us-east-1" + + if accountId == "000000000000": + accountId = None + + eventTime = self.iso8061_to_epochseconds(finding["CreatedAt"]) + + recordState = finding["RecordState"] + recordStateMapping = self.record_state_to_status(recordState) ocsf = { # Base Event data - "activity_id": 1, - "activity_name": "Create", + "activity_id": recordStateMapping.activityId, + "activity_name": recordStateMapping.activityName, "category_name": "Findings", "category_uid": 2, "class_name": "Compliance Finding", "class_uid": 2003, "confidence_score": finding["Confidence"], - "severity": asffToOcsf[1], - "severity_id": asffToOcsf[0], - "status": "New", - "status_id": 1, - "time": self.iso8061_to_epochseconds(finding["CreatedAt"]), - "type_name": "Compliance Finding: Create", - "type_uid": 200301, + "severity": asffToOcsf.severity, + "severity_id": asffToOcsf.severityId, + "status": recordStateMapping.status, + "status_id": recordStateMapping.statusId, + "start_time": eventTime, + "time": eventTime, + "type_name": recordStateMapping.typeName, + "type_uid": recordStateMapping.typeUid, # Profiles / Metadata "metadata": { "uid": finding["Id"], "correlation_uid": finding["GeneratorId"], - "version":"1.1.0", + "log_provider": "ElectricEye", + "logged_time": eventTime, + "original_time": finding["CreatedAt"], + "processed_time": procssedTime, + "version":"1.4.0", + "profiles":["cloud"], "product": { "name":"ElectricEye", "version":"3.0", "url_string":"https://github.com/jonrau1/ElectricEye", "vendor_name":"ElectricEye" }, - "profiles":[ - "cloud" - ] }, "cloud": { "provider": finding["ProductFields"]["Provider"], - "project_uid": finding["ProductFields"]["ProviderAccountId"], - "region": finding["ProductFields"]["AssetRegion"], + "region": region, "account": { - "uid": finding["ProductFields"]["ProviderAccountId"], - "type": asffToOcsf[3], - "type_uid": asffToOcsf[2] + "uid": accountId, + "type": asffToOcsf.cloudAccountType, + "type_uid": asffToOcsf.cloudAccountTypeId } }, # Observables "observables": [ # Cloud Account (Project) UID { - "name": "cloud.project_uid", - "type": "Resource UID", - "type_id": 10, - "value": finding["ProductFields"]["ProviderAccountId"] + "name": "cloud.account.uid", + "type": "Account UID", + "type_id": 35, + "value": accountId }, # Resource UID { @@ -356,14 +443,14 @@ def ocsf_compliance_finding_mapping(self, findings: list) -> list: ], # Compliance Finding Class Info "compliance": { - "requirements": finding["Compliance"]["RelatedRequirements"], + "requirements": sorted(requirements), "control": str(finding["Title"]).split("] ")[0].replace("[",""), - "standards": SUPPORTED_FRAMEWORKS, + "standards": sorted(standard), "status": asffToOcsf[5], "status_id": asffToOcsf[4] }, "finding_info": { - "created_time": self.iso8061_to_epochseconds(finding["CreatedAt"]), + "created_time": eventTime, "desc": finding["Description"], "first_seen_time": self.iso8061_to_epochseconds(finding["FirstObservedAt"]), "modified_time": self.iso8061_to_epochseconds(finding["UpdatedAt"]), @@ -376,17 +463,18 @@ def ocsf_compliance_finding_mapping(self, findings: list) -> list: "desc": finding["Remediation"]["Recommendation"]["Text"], "references": [finding["Remediation"]["Recommendation"]["Url"]] }, - "resource": { - "data": finding["ProductFields"]["AssetDetails"], - "cloud_partition": finding["Resources"][0]["Partition"], - "region": finding["ProductFields"]["AssetRegion"], - "type": finding["ProductFields"]["AssetService"], - "uid": finding["Resources"][0]["Id"] - }, + "resources": [ + { + "data": finding["ProductFields"]["AssetDetails"], + "cloud_partition": partition, + "region": region, + "type": finding["ProductFields"]["AssetService"], + "uid": finding["Resources"][0]["Id"] + } + ], "unmapped": { - "provide_type": finding["ProductFields"]["ProviderType"], + "provider_type": finding["ProductFields"]["ProviderType"], "asset_class": finding["ProductFields"]["AssetClass"], - "asset_service": finding["ProductFields"]["AssetService"], "asset_component": finding["ProductFields"]["AssetComponent"], "workflow_status": finding["Workflow"]["Status"], "record_state": finding["RecordState"] diff --git a/eeauditor/processor/outputs/ocsf_v1_1_0_output.py b/eeauditor/processor/outputs/ocsf_v1_1_0_output.py index 356b2785..70ac0b48 100644 --- a/eeauditor/processor/outputs/ocsf_v1_1_0_output.py +++ b/eeauditor/processor/outputs/ocsf_v1_1_0_output.py @@ -58,10 +58,11 @@ "CIS Amazon Web Services Foundations Benchmark V3.0", "MITRE ATT&CK", "CIS AWS Database Services Benchmark V1.0", - "CIS Microsoft Azure Foundations Benchmark V2.0.0" + "CIS Microsoft Azure Foundations Benchmark V2.0.0", + "CIS Snowflake Foundations Benchmark V1.0.0" ] -class AsffOcsfNormalizedMapping(NamedTuple): +class SeverityAccountTypeComplianceMapping(NamedTuple): severityId: int severity: str cloudAccountTypeId: int @@ -69,6 +70,14 @@ class AsffOcsfNormalizedMapping(NamedTuple): complianceStatusId: int complianceStatus: str +class ActivityStatusTypeMapping(NamedTuple): + activityId: int + activityName: str + statusId: int + status: str + typeUid: int + typeName: str + here = path.abspath(path.dirname(__file__)) with open(f"{here}/mapped_compliance_controls.json") as jsonfile: CONTROLS_CROSSWALK = json.load(jsonfile) @@ -149,7 +158,7 @@ def nist_csf_v_1_1_controls_crosswalk(self, nistCsfSubcategory): except KeyError: return [] - def asff_to_ocsf_normalization(self, severityLabel: str, cloudProvider: str, complianceStatusLabel: str) -> AsffOcsfNormalizedMapping: + def asff_to_ocsf_normalization(self, severityLabel: str, cloudProvider: str, complianceStatusLabel: str) -> SeverityAccountTypeComplianceMapping: """ Normalizes the following ASFF Severity, Cloud Account Provider, and Compliance values into OCSF """ @@ -213,7 +222,31 @@ def iso8061_to_epochseconds(self, iso8061: str) -> int: Converts ISO 8061 datetime into Epochseconds timestamp """ return int(datetime.fromisoformat(iso8061).timestamp()) + + def record_state_to_status(self, recordState: str) -> ActivityStatusTypeMapping: + """ + Maps ElectricEye RecordState to OCSF Status + """ + if recordState == "ACTIVE": + return ActivityStatusTypeMapping( + activityId=1, + activityName="Create", + statusId=1, + status="New", + typeUid=200301, + typeName="Compliance Finding: Create" + ) + if recordState == "ARCHIVED": + return ActivityStatusTypeMapping( + activityId=3, + activityName="Close", + statusId=4, + status="Resolved", + typeUid=200303, + typeName="Compliance Finding: Close" + ) + def ocsf_compliance_finding_mapping(self, findings: list) -> list: """ Takes ElectricEye ASFF and outputs to OCSF v1.1.0 Compliance Finding (2003), returns a list of new findings @@ -239,40 +272,46 @@ def ocsf_compliance_finding_mapping(self, findings: list) -> list: complianceStatusLabel=finding["Compliance"]["Status"] ) + # Non-AWS checks have hardcoded "dummy" data for Account, Region, and Partition - set these to none + provider = finding["ProductFields"]["Provider"] partition = finding["Resources"][0]["Partition"] region = finding["ProductFields"]["AssetRegion"] accountId = finding["ProductFields"]["ProviderAccountId"] - if partition != "AWS" or partition == "not-aws": + if provider != "AWS" or partition == "not-aws": partition = None - if partition == "AWS" and region == "us-placeholder-1": + if region == "us-placeholder-1": region = None - if partition == "AWS" and accountId == "000000000000": + if region == "aws-global": + region = "us-east-1" + + if accountId == "000000000000": accountId = None - # Non-AWS checks have hardcoded "dummy" data for Account, Region, and Partition - set these to none depending on the dummy data - #region = "us-placeholder-1" - #account = "000000000000" - #partition = "not-aws" + eventTime = self.iso8061_to_epochseconds(finding["CreatedAt"]) + + recordState = finding["RecordState"] + recordStateMapping = self.record_state_to_status(recordState) ocsf = { # Base Event data - "activity_id": 1, - "activity_name": "Create", + "activity_id": recordStateMapping.activityId, + "activity_name": recordStateMapping.activityName, "category_name": "Findings", "category_uid": 2, "class_name": "Compliance Finding", "class_uid": 2003, "confidence_score": finding["Confidence"], - "severity": asffToOcsf[1], - "severity_id": asffToOcsf[0], - "status": "New", - "status_id": 1, - "time": self.iso8061_to_epochseconds(finding["CreatedAt"]), - "type_name": "Compliance Finding: Create", - "type_uid": 200301, + "severity": asffToOcsf.severity, + "severity_id": asffToOcsf.severityId, + "status": recordStateMapping.status, + "status_id": recordStateMapping.statusId, + "start_time": eventTime, + "time": eventTime, + "type_name": recordStateMapping.typeName, + "type_uid": recordStateMapping.typeUid, # Profiles / Metadata "metadata": { "uid": finding["Id"], @@ -293,8 +332,8 @@ def ocsf_compliance_finding_mapping(self, findings: list) -> list: "region": region, "account": { "uid": accountId, - "type": asffToOcsf[3], - "type_uid": asffToOcsf[2] + "type": asffToOcsf.cloudAccountType, + "type_uid": asffToOcsf.cloudAccountTypeId } }, # Observables diff --git a/eeauditor/processor/outputs/ocsf_v1_4_0_output.py b/eeauditor/processor/outputs/ocsf_v1_4_0_output.py new file mode 100644 index 00000000..d74dff55 --- /dev/null +++ b/eeauditor/processor/outputs/ocsf_v1_4_0_output.py @@ -0,0 +1,424 @@ +#This file is part of ElectricEye. +#SPDX-License-Identifier: Apache-2.0 + +#Licensed to the Apache Software Foundation (ASF) under one +#or more contributor license agreements. See the NOTICE file +#distributed with this work for additional information +#regarding copyright ownership. The ASF licenses this file +#to you under the Apache License, Version 2.0 (the +#"License"); you may not use this file except in compliance +#with the License. You may obtain a copy of the License at + +#http://www.apache.org/licenses/LICENSE-2.0 + +#Unless required by applicable law or agreed to in writing, +#software distributed under the License is distributed on an +#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +#KIND, either express or implied. See the License for the +#specific language governing permissions and limitations +#under the License. + +import logging +import sys +from typing import NamedTuple +from os import path +from processor.outputs.output_base import ElectricEyeOutput +import json +from base64 import b64decode +from datetime import datetime + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("OCSF_V1.4.0_Output") + +# NOTE TO SELF: Updated this and FAQ.md as new standards are added +SUPPORTED_FRAMEWORKS = [ + "NIST CSF V1.1", + "NIST SP 800-53 Rev. 4", + "AICPA TSC", + "ISO 27001:2013", + "CIS Critical Security Controls V8", + "NIST SP 800-53 Rev. 5", + "NIST SP 800-171 Rev. 2", + "CSA Cloud Controls Matrix V4.0", + "CMMC 2.0", + "UK NCSC Cyber Essentials V2.2", + "HIPAA Security Rule 45 CFR Part 164 Subpart C", + "FFIEC Cybersecurity Assessment Tool", + "NERC Critical Infrastructure Protection", + "NYDFS 23 NYCRR Part 500", + "UK NCSC Cyber Assessment Framework V3.1", + "PCI-DSS V4.0", + "NZISM V3.5", + "ISO 27001:2022", + "Critical Risk Profile V1.2", + "ECB CROE", + "Equifax SCF V1.0", + "FBI CJIS Security Policy V5.9", + "CIS Amazon Web Services Foundations Benchmark V1.5", + "CIS Amazon Web Services Foundations Benchmark V2.0", + "CIS Amazon Web Services Foundations Benchmark V3.0", + "MITRE ATT&CK", + "CIS AWS Database Services Benchmark V1.0", + "CIS Microsoft Azure Foundations Benchmark V2.0.0", + "CIS Snowflake Foundations Benchmark V1.0.0" +] + +class SeverityAccountTypeComplianceMapping(NamedTuple): + severityId: int + severity: str + cloudAccountTypeId: int + cloudAccountType: str + complianceStatusId: int + complianceStatus: str + +class ActivityStatusTypeMapping(NamedTuple): + activityId: int + activityName: str + statusId: int + status: str + typeUid: int + typeName: str + +here = path.abspath(path.dirname(__file__)) +with open(f"{here}/mapped_compliance_controls.json") as jsonfile: + CONTROLS_CROSSWALK = json.load(jsonfile) + +@ElectricEyeOutput +class OcsfV140Output(object): + __provider__ = "ocsf_v1_4_0" + + def write_findings(self, findings: list, output_file: str, **kwargs): + if len(findings) == 0: + logger.error("There are not any findings to write to file!") + sys.exit(0) + + logger.info( + "Converting %s findings into OCSF v1.4.0 events", + len(findings) + ) + + decodedFindings = [ + {**d, "ProductFields": {**d["ProductFields"], + "AssetDetails": json.loads(b64decode(d["ProductFields"]["AssetDetails"]).decode("utf-8")) + if d["ProductFields"]["AssetDetails"] is not None + else None + }} if "AssetDetails" in d["ProductFields"] + else d + for d in findings + ] + + del findings + + # Map in the new compliance controls + for finding in decodedFindings: + complianceRelatedRequirements = list(finding["Compliance"]["RelatedRequirements"]) + newControls = [] + nistCsfControls = [control for control in complianceRelatedRequirements if control.startswith("NIST CSF V1.1")] + for control in nistCsfControls: + crosswalkedControls = self.nist_csf_v_1_1_controls_crosswalk(control) + # Not every single NIST CSF Control maps across to other frameworks + if crosswalkedControls: + for crosswalk in crosswalkedControls: + if crosswalk not in newControls: + newControls.append(crosswalk) + else: + continue + + complianceRelatedRequirements.extend(newControls) + + del finding["Compliance"]["RelatedRequirements"] + finding["Compliance"]["RelatedRequirements"] = complianceRelatedRequirements + + ocsfFindings = self.ocsf_compliance_finding_mapping(decodedFindings) + + del decodedFindings + + # create output file based on inputs + jsonfile = f"{output_file}_ocsf_v1-4-0_events.json" + logger.info(f"Output file named: {jsonfile}") + + with open(jsonfile, "w") as jsonfile: + json.dump( + ocsfFindings, + jsonfile, + indent=4, + default=str + ) + + return True + + def nist_csf_v_1_1_controls_crosswalk(self, nistCsfSubcategory): + """ + This function returns a list of additional control framework control IDs that mapped into a provided + NIST CSF V1.1 Subcategory (control) + """ + + # Not every single NIST CSF Control maps across to other frameworks + try: + return CONTROLS_CROSSWALK[nistCsfSubcategory] + except KeyError: + return [] + + def compliance_finding_ocsf_normalization(self, severityLabel: str, cloudProvider: str, complianceStatusLabel: str) -> SeverityAccountTypeComplianceMapping: + """ + Normalizes the following ASFF Severity, Cloud Account Provider, and Compliance values into OCSF + """ + + # map Severity.Label -> base_event.severity_id, base_event.severity + if severityLabel == "INFORMATIONAL": + severityId = 1 + severity = severityLabel.lower().capitalize() + if severityLabel == "LOW": + severityId = 2 + severity = severityLabel.lower().capitalize() + if severityLabel == "MEDIUM": + severityId = 3 + severity = severityLabel.lower().capitalize() + if severityLabel == "HIGH": + severityId = 4 + severity = severityLabel.lower().capitalize() + if severityLabel == "CRITICAL": + severityId = 5 + severity = severityLabel.lower().capitalize() + else: + severityId = 99 + severity = severityLabel.lower().capitalize() + + # map ProductFields.Provider -> cloud.account.type_id, cloud.account.type + if cloudProvider == "AWS": + acctTypeId = 10 + acctType = "AWS Account" + elif cloudProvider == "GCP": + acctTypeId = 11 + acctType = "GCP Project" + elif cloudProvider == "OCI": + acctTypeId = 12 + acctType = "OCI Compartment" + elif cloudProvider == "Azure": + acctTypeId = 13 + acctType = "Azure Subscription" + elif cloudProvider == "Salesforce": + acctTypeId = 14 + acctType = "Salesforce Account" + elif cloudProvider == "Google Workspace": + acctTypeId = 15 + acctType = "Google Workspace" + elif cloudProvider == "ServiceNow": + acctTypeId = 16 + acctType = "ServiceNow Instance" + elif cloudProvider == "M365": + acctTypeId = 17 + acctType = "M365 Tenant" + else: + acctTypeId = 99 + acctType = cloudProvider + + # map Compliance.Status -> compliance.status_id, compliance.status + if complianceStatusLabel == "PASSED": + complianceStatusId = 1 + complianceStatus = "Pass" + elif complianceStatusLabel == "WARNING": + complianceStatusId = 2 + complianceStatus = "Warning" + elif complianceStatusLabel == "FAILED": + complianceStatusId = 3 + complianceStatus = "Fail" + else: + complianceStatusId = 99 + complianceStatus = complianceStatusLabel.lower().capitalize() + + return SeverityAccountTypeComplianceMapping( + severityId=severityId, + severity=severity, + cloudAccountTypeId=acctTypeId, + cloudAccountType=acctType, + complianceStatusId=complianceStatusId, + complianceStatus=complianceStatus + ) + + def iso8061_to_epochseconds(self, iso8061: str) -> int: + """ + Converts ISO 8061 datetime into Epochseconds timestamp + """ + return int(datetime.fromisoformat(iso8061).timestamp()) + + def record_state_to_status(self, recordState: str) -> ActivityStatusTypeMapping: + """ + Maps ElectricEye RecordState to OCSF Status + """ + if recordState == "ACTIVE": + return ActivityStatusTypeMapping( + activityId=1, + activityName="Create", + statusId=1, + status="New", + typeUid=200301, + typeName="Compliance Finding: Create" + ) + + if recordState == "ARCHIVED": + return ActivityStatusTypeMapping( + activityId=3, + activityName="Close", + statusId=4, + status="Resolved", + typeUid=200303, + typeName="Compliance Finding: Close" + ) + + def ocsf_compliance_finding_mapping(self, findings: list) -> list: + """ + Takes ElectricEye ASFF and outputs to OCSF v1.1.0 Compliance Finding (2003), returns a list of new findings + """ + + ocsfFindings = [] + + logger.info("Mapping ASFF to OCSF") + + for finding in findings: + # Generate metadata.processed_time + timeNow = datetime.now().isoformat() + procssedTime = self.iso8061_to_epochseconds(timeNow) + + # check if the compliance.requirements start with the control frameworks and append the unique ones into a list for compliance.stnadards + standard = [] + requirements = finding["Compliance"]["RelatedRequirements"] + for control in requirements: + for framework in SUPPORTED_FRAMEWORKS: + if str(control).startswith(framework) and framework not in standard: + standard.append(framework) + + asffToOcsf = self.compliance_finding_ocsf_normalization( + severityLabel=finding["Severity"]["Label"], + cloudProvider=finding["ProductFields"]["Provider"], + complianceStatusLabel=finding["Compliance"]["Status"] + ) + + # Non-AWS checks have hardcoded "dummy" data for Account, Region, and Partition - set these to none + provider = finding["ProductFields"]["Provider"] + partition = finding["Resources"][0]["Partition"] + region = finding["ProductFields"]["AssetRegion"] + accountId = finding["ProductFields"]["ProviderAccountId"] + + if provider != "AWS" or partition == "not-aws": + partition = None + + if region == "us-placeholder-1": + region = None + + if region == "aws-global": + region = "us-east-1" + + if accountId == "000000000000": + accountId = None + + eventTime = self.iso8061_to_epochseconds(finding["CreatedAt"]) + + recordState = finding["RecordState"] + recordStateMapping = self.record_state_to_status(recordState) + + ocsf = { + # Base Event data + "activity_id": recordStateMapping.activityId, + "activity_name": recordStateMapping.activityName, + "category_name": "Findings", + "category_uid": 2, + "class_name": "Compliance Finding", + "class_uid": 2003, + "confidence_score": finding["Confidence"], + "severity": asffToOcsf.severity, + "severity_id": asffToOcsf.severityId, + "status": recordStateMapping.status, + "status_id": recordStateMapping.statusId, + "start_time": eventTime, + "time": eventTime, + "type_name": recordStateMapping.typeName, + "type_uid": recordStateMapping.typeUid, + # Profiles / Metadata + "metadata": { + "uid": finding["Id"], + "correlation_uid": finding["GeneratorId"], + "log_provider": "ElectricEye", + "logged_time": eventTime, + "original_time": finding["CreatedAt"], + "processed_time": procssedTime, + "version":"1.4.0", + "profiles":["cloud"], + "product": { + "name":"ElectricEye", + "version":"3.0", + "url_string":"https://github.com/jonrau1/ElectricEye", + "vendor_name":"ElectricEye" + }, + }, + "cloud": { + "provider": finding["ProductFields"]["Provider"], + "region": region, + "account": { + "uid": accountId, + "type": asffToOcsf.cloudAccountType, + "type_uid": asffToOcsf.cloudAccountTypeId + } + }, + # Observables + "observables": [ + # Cloud Account (Project) UID + { + "name": "cloud.account.uid", + "type": "Account UID", + "type_id": 35, + "value": accountId + }, + # Resource UID + { + "name": "resource.uid", + "type": "Resource UID", + "type_id": 10, + "value": finding["Resources"][0]["Id"] + } + ], + # Compliance Finding Class Info + "compliance": { + "requirements": sorted(requirements), + "control": str(finding["Title"]).split("] ")[0].replace("[",""), + "standards": sorted(standard), + "status": asffToOcsf[5], + "status_id": asffToOcsf[4] + }, + "finding_info": { + "created_time": eventTime, + "desc": finding["Description"], + "first_seen_time": self.iso8061_to_epochseconds(finding["FirstObservedAt"]), + "modified_time": self.iso8061_to_epochseconds(finding["UpdatedAt"]), + "product_uid": finding["ProductArn"], + "title": finding["Title"], + "types": finding["Types"], + "uid": finding["Id"] + }, + "remediation": { + "desc": finding["Remediation"]["Recommendation"]["Text"], + "references": [finding["Remediation"]["Recommendation"]["Url"]] + }, + "resources": [ + { + "data": finding["ProductFields"]["AssetDetails"], + "cloud_partition": partition, + "region": region, + "type": finding["ProductFields"]["AssetService"], + "uid": finding["Resources"][0]["Id"] + } + ], + "unmapped": { + "provider_type": finding["ProductFields"]["ProviderType"], + "asset_class": finding["ProductFields"]["AssetClass"], + "asset_component": finding["ProductFields"]["AssetComponent"], + "workflow_status": finding["Workflow"]["Status"], + "record_state": finding["RecordState"] + } + } + ocsfFindings.append(ocsf) + + del standard + del requirements + + return ocsfFindings \ No newline at end of file diff --git a/eeauditor/processor/outputs/output_base.py b/eeauditor/processor/outputs/output_base.py index 2061b2fe..b2737a6a 100644 --- a/eeauditor/processor/outputs/output_base.py +++ b/eeauditor/processor/outputs/output_base.py @@ -18,6 +18,11 @@ #specific language governing permissions and limitations #under the License. +import logging +from sys import exit as sysexit + +logger = logging.getLogger("OutputBase") + class ElectricEyeOutput(object): """Class to be used as a decorator to register all output providers""" @@ -32,9 +37,11 @@ def get_provider(cls, provider): """Returns the class to process the findings""" try: return cls._outputs[provider] - except KeyError as ke: - print(f"Designated output provider {provider} does not exist") - raise ke + except KeyError: + logger.warning( + "Designated output provider %s does not exist", provider + ) + sysexit(2) @classmethod def get_all_providers(cls): diff --git a/requirements-docker.txt b/requirements-docker.txt index 23d76080..a7a34419 100644 --- a/requirements-docker.txt +++ b/requirements-docker.txt @@ -10,4 +10,5 @@ pymongo>=4.6.1 pysnow>=0.7.17 python3-nmap>=1.6.0 tomli>=2.0.1 -vt-py>=0.18.0 \ No newline at end of file +vt-py>=0.18.0 +snowflake-connector-python>=3.12.1 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 64cf54cc..107a8531 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,4 +12,5 @@ pymongo>=4.6.1 pysnow<=0.7.17 python3-nmap>=1.6.0 tomli>=2.0.1 -vt-py>=0.18.0 \ No newline at end of file +vt-py>=0.18.0 +snowflake-connector-python>=3.12.1 \ No newline at end of file diff --git a/screenshots/ElectricEye2024Architecture.svg b/screenshots/ElectricEye2024Architecture.svg deleted file mode 100644 index bf52b916..00000000 --- a/screenshots/ElectricEye2024Architecture.svg +++ /dev/null @@ -1 +0,0 @@ -EVALUATECLOUD SECURITY POSTUREMANAGEMENT (CSPM)SAAS SECURITY POSTUREMANAGEMENT (SSPM)ENRICHREPORTATTACK SURFACEMONITORING (ASM)COMINGSOON!AWS SecurityHubJSON, CSV,HTMLAmazonDocumentDBMongoDBFiremonCloudDefenseSlackAmazonSQSAWS KinesisFirehoseSUPPORTED OUTPUTS(OCSF, File, DB, Queue, SaaS)PostgreSQLOCSF v1.1.0COMINGSOON!COMINGSOON!COMINGSOON! \ No newline at end of file diff --git a/screenshots/ElectricEyeAnimated.gif b/screenshots/ElectricEyeAnimated.gif deleted file mode 100644 index 50515765..00000000 Binary files a/screenshots/ElectricEyeAnimated.gif and /dev/null differ diff --git a/screenshots/architecture-for-github-thumbnail.jpg b/screenshots/architecture-for-github-thumbnail.jpg deleted file mode 100644 index 7de3b545..00000000 Binary files a/screenshots/architecture-for-github-thumbnail.jpg and /dev/null differ diff --git a/screenshots/electrice_eye_architecture.jpg b/screenshots/electrice_eye_architecture.jpg new file mode 100644 index 00000000..20223b57 Binary files /dev/null and b/screenshots/electrice_eye_architecture.jpg differ diff --git a/screenshots/extras/ElectricEye.pptx b/screenshots/extras/ElectricEye.pptx index 46808012..f418aded 100644 Binary files a/screenshots/extras/ElectricEye.pptx and b/screenshots/extras/ElectricEye.pptx differ diff --git a/screenshots/setup/snowflake/step1.JPG b/screenshots/setup/snowflake/step1.JPG new file mode 100644 index 00000000..6520711b Binary files /dev/null and b/screenshots/setup/snowflake/step1.JPG differ diff --git a/screenshots/setup/snowflake/step2.JPG b/screenshots/setup/snowflake/step2.JPG new file mode 100644 index 00000000..47090821 Binary files /dev/null and b/screenshots/setup/snowflake/step2.JPG differ diff --git a/screenshots/setup/snowflake/step3.JPG b/screenshots/setup/snowflake/step3.JPG new file mode 100644 index 00000000..f6cacb72 Binary files /dev/null and b/screenshots/setup/snowflake/step3.JPG differ diff --git a/screenshots/setup/snowflake/step5.JPG b/screenshots/setup/snowflake/step5.JPG new file mode 100644 index 00000000..1701d031 Binary files /dev/null and b/screenshots/setup/snowflake/step5.JPG differ diff --git a/screenshots/setup/snowflake/step6.JPG b/screenshots/setup/snowflake/step6.JPG new file mode 100644 index 00000000..d9c3994c Binary files /dev/null and b/screenshots/setup/snowflake/step6.JPG differ