From 8d3ae0436be52599296efb67f082206cab413872 Mon Sep 17 00:00:00 2001 From: Evi Vanoost Date: Sat, 24 Feb 2024 13:49:42 -0500 Subject: [PATCH 01/21] smartctl_exporter publishes both drive_trip and current drive temperatures. Since most of the alerts are going to be permanent, it does not make sense to wait for the alert to be on for a certain time. Temperature sensors likewise vary, using the last sample is not sufficient to alert on potential issues. --- CONTRIBUTING.md | 4 ++-- _data/rules.yml | 41 +++++++++++++++++++++++------------------ 2 files changed, 25 insertions(+), 20 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1fcb24bfc..02b8c3865 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -32,8 +32,8 @@ Or with Docker: docker run --rm -it -p 4000:4000 -v $(pwd):/srv/jekyll jekyll/jekyll jekyll serve ``` -Or with Docker-Compose: +Or with Docker Compose: ``` -docker-compose up -d +docker compose up -d ``` diff --git a/_data/rules.yml b/_data/rules.yml index 42ff8f0ae..9abb28514 100644 --- a/_data/rules.yml +++ b/_data/rules.yml @@ -338,31 +338,36 @@ groups: slug: smartctl-exporter doc_url: https://github.com/prometheus-community/smartctl_exporter rules: - - name: Smart device temperature warning - description: Device temperature warning (instance {{ $labels.instance }}) - query: smartctl_device_temperature > 60 + - name: SMART device temperature warning + description: Device temperature warning (instance {{ $labels.instance }}, drive {{ $labels.device }}) + query: avg_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) > 60 severity: warning - for: 2m - - name: Smart device temperature critical - description: Device temperature critical (instance {{ $labels.instance }}) - query: smartctl_device_temperature > 80 + - name: SMART device temperature critical + description: Device temperature critical (instance {{ $labels.instance }}, drive {{ $labels.device }}) + query: max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= 70 severity: critical - for: 2m - - name: Smart critical warning - description: device has critical warning (instance {{ $labels.instance }}) + # Datacenter drives have a trip temperature + - name: SMART device temperature was over trip value + description: Device temperature over trip value (instance {{ $labels.instance }}, drive {{ $labels.device }}) + query: max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) smartctl_device_temperature{temperature_type="drive_trip"} + severity: critical + - name: SMART status + description: Device has a SMART status failure (instance {{ $labels.instance }}, drive {{ $labels.device }}) + query: smartctl_device_smart_status != 1 + severity: critical + - name: SMART critical warning + description: Disk controller has critical warning (instance {{ $labels.instance }}, drive {{ $labels.device }}) query: smartctl_device_critical_warning > 0 severity: critical - for: 15m - - name: Smart media errors - description: device has media errors (instance {{ $labels.instance }}) + - name: SMART media errors + description: Disk controller detected media errors (instance {{ $labels.instance }}, drive {{ $labels.device }}) query: smartctl_device_media_errors > 0 severity: critical - for: 15m - - name: Smart NVME Wearout Indicator - description: NVMe device is wearing out (instance {{ $labels.instance }}) - query: smartctl_device_available_spare{device=~"nvme.*"} < smartctl_device_available_spare_threshold{device=~"nvme.*"} + - name: SMART Wearout Indicator + description: Device is wearing out (instance {{ $labels.instance }}, drive {{ $labels.device }}) + # The threshold is not present on devices that do not support it + query: smartctl_device_available_spare < smartctl_device_available_spare_threshold severity: critical - for: 15m - name: Docker containers exporters: From 6fe429e7b7db33dccfa69526f4025f3d7272450a Mon Sep 17 00:00:00 2001 From: Evi Vanoost Date: Sat, 24 Feb 2024 14:01:16 -0500 Subject: [PATCH 02/21] Add an option to run GitHub Action manually --- .github/workflows/dist.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 1f64e366b..03c2389b9 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -1,6 +1,7 @@ name: Publish on: + workflow_dispatch: push: branches: - master From fbca1a1cb81e113b9839277bf5e816629ad2abad Mon Sep 17 00:00:00 2001 From: Evi Vanoost Date: Sat, 24 Feb 2024 14:08:30 -0500 Subject: [PATCH 03/21] Add an option to force running the action for testing purposes --- .github/workflows/dist.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 03c2389b9..61d23fb8f 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -9,8 +9,8 @@ on: jobs: publish: name: Publish - # Check if the PR is not from a fork - if: github.repository_owner == 'samber' + # Check if the PR is not from a fork or manually executed + if: github.repository_owner == 'samber' or github.event_name == 'workflow_dispatch' runs-on: ubuntu-latest steps: - name: Checkout Repo From e3bc917e6b817e8c3a0eed2971078ed361c81787 Mon Sep 17 00:00:00 2001 From: Evi Vanoost Date: Sat, 24 Feb 2024 14:10:16 -0500 Subject: [PATCH 04/21] Set variables correctly --- .github/workflows/dist.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 61d23fb8f..9f5ec1b72 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -10,7 +10,7 @@ jobs: publish: name: Publish # Check if the PR is not from a fork or manually executed - if: github.repository_owner == 'samber' or github.event_name == 'workflow_dispatch' + if: ${{ (github.repository_owner == 'samber') or (github.event_name == 'workflow_dispatch') }} runs-on: ubuntu-latest steps: - name: Checkout Repo From 79960ae2b44b8b11418cf1777b1a6e67b4049d54 Mon Sep 17 00:00:00 2001 From: Evi Vanoost Date: Sat, 24 Feb 2024 14:12:32 -0500 Subject: [PATCH 05/21] Set variables correctly --- .github/workflows/dist.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 9f5ec1b72..e0ba5c8a5 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -10,7 +10,7 @@ jobs: publish: name: Publish # Check if the PR is not from a fork or manually executed - if: ${{ (github.repository_owner == 'samber') or (github.event_name == 'workflow_dispatch') }} + if: ${{ (github.repository_owner == 'samber') || (github.event_name == 'workflow_dispatch') }} runs-on: ubuntu-latest steps: - name: Checkout Repo From 59dc6dca5c9bb366033090518ed2831b8acd422c Mon Sep 17 00:00:00 2001 From: samber Date: Sat, 24 Feb 2024 19:15:25 +0000 Subject: [PATCH 06/21] Publish --- .../smartctl-exporter.yml | 56 ++++++++++++------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml b/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml index 1946c38e1..4334bf02c 100644 --- a/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml +++ b/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml @@ -5,46 +5,64 @@ groups: rules: - alert: SmartDeviceTemperatureWarning - expr: 'smartctl_device_temperature > 60' - for: 2m + expr: 'avg_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) > 60' + for: 0m labels: severity: warning annotations: - summary: Smart device temperature warning (instance {{ $labels.instance }}) - description: "Device temperature warning (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: SMART device temperature warning (instance {{ $labels.instance }}) + description: "Device temperature warning (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartDeviceTemperatureCritical - expr: 'smartctl_device_temperature > 80' - for: 2m + expr: 'max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= 70' + for: 0m labels: severity: critical annotations: - summary: Smart device temperature critical (instance {{ $labels.instance }}) - description: "Device temperature critical (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: SMART device temperature critical (instance {{ $labels.instance }}) + description: "Device temperature critical (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: SmartDeviceTemperatureWasOverTripValue + expr: 'max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) smartctl_device_temperature{temperature_type="drive_trip"}' + for: 0m + labels: + severity: critical + annotations: + summary: SMART device temperature was over trip value (instance {{ $labels.instance }}) + description: "Device temperature over trip value (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: SmartStatus + expr: 'smartctl_device_smart_status != 1' + for: 0m + labels: + severity: critical + annotations: + summary: SMART status (instance {{ $labels.instance }}) + description: "Device has a SMART status failure (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartCriticalWarning expr: 'smartctl_device_critical_warning > 0' - for: 15m + for: 0m labels: severity: critical annotations: - summary: Smart critical warning (instance {{ $labels.instance }}) - description: "device has critical warning (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: SMART critical warning (instance {{ $labels.instance }}) + description: "Disk controller has critical warning (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartMediaErrors expr: 'smartctl_device_media_errors > 0' - for: 15m + for: 0m labels: severity: critical annotations: - summary: Smart media errors (instance {{ $labels.instance }}) - description: "device has media errors (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: SMART media errors (instance {{ $labels.instance }}) + description: "Disk controller detected media errors (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: SmartNvmeWearoutIndicator - expr: 'smartctl_device_available_spare{device=~"nvme.*"} < smartctl_device_available_spare_threshold{device=~"nvme.*"}' - for: 15m + - alert: SmartWearoutIndicator + expr: 'smartctl_device_available_spare < smartctl_device_available_spare_threshold' + for: 0m labels: severity: critical annotations: - summary: Smart NVME Wearout Indicator (instance {{ $labels.instance }}) - description: "NVMe device is wearing out (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: SMART Wearout Indicator (instance {{ $labels.instance }}) + description: "Device is wearing out (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" From d6ef8e7449ea27e305a846a45ceda9df55cef41f Mon Sep 17 00:00:00 2001 From: Evi Vanoost Date: Sat, 24 Feb 2024 19:06:50 -0500 Subject: [PATCH 07/21] Clean up some more metrics --- _data/rules.yml | 112 ++++++++++++++++++++++-------------------------- 1 file changed, 52 insertions(+), 60 deletions(-) diff --git a/_data/rules.yml b/_data/rules.yml index 9abb28514..8cb096359 100644 --- a/_data/rules.yml +++ b/_data/rules.yml @@ -137,53 +137,46 @@ groups: rules: - name: Host out of memory description: Node memory is filling up (< 10% left) - query: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: warning for: 2m - name: Host memory under memory pressure - description: The node is under heavy memory pressure. High rate of major page faults - query: '(rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: The node is under heavy memory pressure. High rate of loading memory pages from disk. + query: '(rate(node_vmstat_pgmajfault[5m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: warning - for: 2m - name: Host Memory is underutilized - description: "Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})" - query: '(100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})" + # We use MemFree, many buffers (ZFS, databases etc) are declared as available memory, but would perform poorly if reduced + query: '((avg_over_time(node_memory_MemFree_bytes[30m]) / node_memory_MemTotal_bytes) > .80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: info for: 1w comments: | You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly - name: Host unusual network throughput in - description: Host network interfaces are probably receiving too much data (> 100 MB/s) - query: '(sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: "Host receive bandwidth is high (>80%)" + query: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: warning - for: 5m - name: Host unusual network throughput out - description: Host network interfaces are probably sending too much data (> 100 MB/s) - query: '(sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: "Host transmit bandwidth is high (>80%)" + query: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: warning - for: 5m - name: Host unusual disk read rate - description: Disk is probably reading too much data (> 50 MB/s) - query: '(sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - severity: warning - for: 5m - - name: Host unusual disk write rate - description: Disk is probably writing too much data (> 50 MB/s) - query: '(sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: "Disk is too busy (IO wait > 80%)" + query: '(rate(node_disk_io_time_seconds_total[5m]) > .80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: warning - for: 2m - name: Host out of disk space - description: Disk is almost full (< 10% left) - query: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - severity: warning + description: "Disk is almost full (< 10% left)" + # Network filesystems have quotas etc. and should not be included in this alert + query: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' + severity: critical comments: | Please add ignored mountpoints in node_exporter parameters like "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)". Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users. for: 2m - - name: Host disk will fill in 24 hours - description: Filesystem is predicted to run out of space within the next 24 hours at current write rate - query: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - name: Host disk may fill in 24 hours + description: Filesystem will likely run out of space within the next 24 hours + query: 'predict_linear(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_avail_bytes > 0' severity: warning comments: | Please add ignored mountpoints in node_exporter parameters like @@ -192,16 +185,21 @@ groups: for: 2m - name: Host out of inodes description: Disk is almost running out of available inodes (< 10% left) - query: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - severity: warning + query: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + severity: critical for: 2m - name: Host filesystem device error - description: "{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem" - query: "node_filesystem_device_error == 1" + description: "Error stat-ing the {{ $labels.mountpoint }} filesystem" + query: 'node_filesystem_device_error{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} == 1' severity: critical + comments: | + This indicates there was a problem getting information for the filesystem via statfs. + This is usually due to permissions issues or virtual filesystems. + Please add ignored mountpoints in node_exporter parameters like + "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)". - name: Host inodes will fill in 24 hours description: Filesystem is predicted to run out of inodes within the next 24 hours at current write rate - query: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"} == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: 'predict_linear(node_filesystem_files_free{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_files_free > 0' severity: warning for: 2m - name: Host unusual disk read latency @@ -216,12 +214,12 @@ groups: for: 2m - name: Host high CPU load description: CPU load is > 80% - query: '(sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: warning for: 10m - name: Host CPU is underutilized - description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs." - query: '(100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs." + query: '((avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > .80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: info for: 1w comments: | @@ -231,14 +229,13 @@ groups: query: '(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: warning - name: Host CPU high iowait - description: CPU iowait > 10%. A high iowait means that you are disk or network bound. - query: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: CPU iowait > 10%. Your CPU is idling waiting for storage to respond. + query: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: warning - name: Host unusual disk IO - description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues." - query: '(rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: "Disk usage >80%. Check storage for issues or increase IOPS capabilities." + query: '(rate(node_disk_io_time_seconds_total[5m]) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: warning - for: 5m - name: Host context switching description: Context switching is growing on the node (> 10000 / CPU / s) query: '((rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' @@ -265,19 +262,19 @@ groups: description: "Physical node temperature alarm triggered" query: '(node_hwmon_temp_crit_alarm_celsius == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: critical - - name: Host RAID array got inactive - description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically." - query: '(node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - name: Host Software RAID is not active + description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining." + query: '(node_md_disks_required - on(device, instance) node_md_disks{state="active"}) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: critical - - name: Host RAID disk failure - description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap" + - name: Host Software RAID disk failure + description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} needs attention." query: '(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: warning for: 2m - name: Host kernel version deviations - description: Different kernel versions are running - query: '(count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - severity: warning + description: Kernel version for {{ $labels.instance }} has changed + query: 'changes(node_uname_info[1h]) == 0' + severity: info for: 6h - name: Host OOM kill detected description: OOM kill detected @@ -301,11 +298,6 @@ groups: query: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: warning for: 2m - - name: Host Network Interface Saturated - description: 'The network interface "{{ $labels.device }}" on "{{ $labels.instance }}" is getting overloaded.' - query: '((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' # < to 10Gb to prevent +inf when max speed is unknown - severity: warning - for: 1m - name: Host Network Bond Degraded description: 'Bond "{{ $labels.device }}" degraded on "{{ $labels.instance }}".' query: '((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' @@ -339,32 +331,32 @@ groups: doc_url: https://github.com/prometheus-community/smartctl_exporter rules: - name: SMART device temperature warning - description: Device temperature warning (instance {{ $labels.instance }}, drive {{ $labels.device }}) - query: avg_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) > 60 + description: Device temperature warning on {{ $labels.instance }} drive {{ $labels.device }}) + query: avg_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) > 60 severity: warning - name: SMART device temperature critical - description: Device temperature critical (instance {{ $labels.instance }}, drive {{ $labels.device }}) + description: Device temperature critical on {{ $labels.instance }} drive {{ $labels.device }}) query: max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= 70 severity: critical # Datacenter drives have a trip temperature - name: SMART device temperature was over trip value - description: Device temperature over trip value (instance {{ $labels.instance }}, drive {{ $labels.device }}) + description: Device temperature over trip value on {{ $labels.instance }} drive {{ $labels.device }}) query: max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) smartctl_device_temperature{temperature_type="drive_trip"} severity: critical - name: SMART status - description: Device has a SMART status failure (instance {{ $labels.instance }}, drive {{ $labels.device }}) + description: Device has a SMART status failure on {{ $labels.instance }} drive {{ $labels.device }}) query: smartctl_device_smart_status != 1 severity: critical - name: SMART critical warning - description: Disk controller has critical warning (instance {{ $labels.instance }}, drive {{ $labels.device }}) + description: Disk controller has critical warning on {{ $labels.instance }} drive {{ $labels.device }}) query: smartctl_device_critical_warning > 0 severity: critical - name: SMART media errors - description: Disk controller detected media errors (instance {{ $labels.instance }}, drive {{ $labels.device }}) + description: Disk controller detected media errors on {{ $labels.instance }} drive {{ $labels.device }}) query: smartctl_device_media_errors > 0 severity: critical - name: SMART Wearout Indicator - description: Device is wearing out (instance {{ $labels.instance }}, drive {{ $labels.device }}) + description: Device is wearing out on {{ $labels.instance }} drive {{ $labels.device }}) # The threshold is not present on devices that do not support it query: smartctl_device_available_spare < smartctl_device_available_spare_threshold severity: critical From b660fafe4c5b7005c07729225f89ee983c379eee Mon Sep 17 00:00:00 2001 From: samber Date: Sun, 25 Feb 2024 00:10:19 +0000 Subject: [PATCH 08/21] Publish --- .../rules/host-and-hardware/node-exporter.yml | 106 ++++++++---------- .../smartctl-exporter.yml | 16 +-- 2 files changed, 52 insertions(+), 70 deletions(-) diff --git a/dist/rules/host-and-hardware/node-exporter.yml b/dist/rules/host-and-hardware/node-exporter.yml index de48231e6..e5fa55cca 100644 --- a/dist/rules/host-and-hardware/node-exporter.yml +++ b/dist/rules/host-and-hardware/node-exporter.yml @@ -5,7 +5,7 @@ groups: rules: - alert: HostOutOfMemory - expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' for: 2m labels: severity: warning @@ -14,97 +14,88 @@ groups: description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostMemoryUnderMemoryPressure - expr: '(rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 2m + expr: '(rate(node_vmstat_pgmajfault[5m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m labels: severity: warning annotations: summary: Host memory under memory pressure (instance {{ $labels.instance }}) - description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "The node is under heavy memory pressure. High rate of loading memory pages from disk.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostMemoryIsUnderutilized - expr: '(100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((avg_over_time(node_memory_MemFree_bytes[30m]) / node_memory_MemTotal_bytes) > .80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' for: 1w labels: severity: info annotations: summary: Host Memory is underutilized (instance {{ $labels.instance }}) - description: "Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualNetworkThroughputIn - expr: '(sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m labels: severity: warning annotations: summary: Host unusual network throughput in (instance {{ $labels.instance }}) - description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Host receive bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualNetworkThroughputOut - expr: '(sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m labels: severity: warning annotations: summary: Host unusual network throughput out (instance {{ $labels.instance }}) - description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Host transmit bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskReadRate - expr: '(sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '(rate(node_disk_io_time_seconds_total[5m]) > .80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m labels: severity: warning annotations: summary: Host unusual disk read rate (instance {{ $labels.instance }}) - description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: HostUnusualDiskWriteRate - expr: '(sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 2m - labels: - severity: warning - annotations: - summary: Host unusual disk write rate (instance {{ $labels.instance }}) - description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Disk is too busy (IO wait > 80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOutOfDiskSpace - expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' for: 2m labels: - severity: warning + severity: critical annotations: summary: Host out of disk space (instance {{ $labels.instance }}) description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostDiskWillFillIn24Hours - expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - alert: HostDiskMayFillIn24Hours + expr: 'predict_linear(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_avail_bytes > 0' for: 2m labels: severity: warning annotations: - summary: Host disk will fill in 24 hours (instance {{ $labels.instance }}) - description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Host disk may fill in 24 hours (instance {{ $labels.instance }}) + description: "Filesystem will likely run out of space within the next 24 hours\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOutOfInodes - expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' for: 2m labels: - severity: warning + severity: critical annotations: summary: Host out of inodes (instance {{ $labels.instance }}) description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostFilesystemDeviceError - expr: 'node_filesystem_device_error == 1' + expr: 'node_filesystem_device_error{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} == 1' for: 0m labels: severity: critical annotations: summary: Host filesystem device error (instance {{ $labels.instance }}) - description: "{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Error stat-ing the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostInodesWillFillIn24Hours - expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"} == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'predict_linear(node_filesystem_files_free{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_files_free > 0' for: 2m labels: severity: warning @@ -131,7 +122,7 @@ groups: description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostHighCpuLoad - expr: '(sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' for: 10m labels: severity: warning @@ -140,13 +131,13 @@ groups: description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuIsUnderutilized - expr: '(100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > .80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' for: 1w labels: severity: info annotations: summary: Host CPU is underutilized (instance {{ $labels.instance }}) - description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuStealNoisyNeighbor expr: '(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' @@ -158,22 +149,22 @@ groups: description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuHighIowait - expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' for: 0m labels: severity: warning annotations: summary: Host CPU high iowait (instance {{ $labels.instance }}) - description: "CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "CPU iowait > 10%. Your CPU is idling waiting for storage to respond.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskIo - expr: '(rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '(rate(node_disk_io_time_seconds_total[5m]) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m labels: severity: warning annotations: summary: Host unusual disk IO (instance {{ $labels.instance }}) - description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Disk usage >80%. Check storage for issues or increase IOPS capabilities.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostContextSwitching expr: '((rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' @@ -220,32 +211,32 @@ groups: summary: Host node overtemperature alarm (instance {{ $labels.instance }}) description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostRaidArrayGotInactive - expr: '(node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - alert: HostSoftwareRaidIsNotActive + expr: '(node_md_disks_required - on(device, instance) node_md_disks{state="active"}) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' for: 0m labels: severity: critical annotations: - summary: Host RAID array got inactive (instance {{ $labels.instance }}) - description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Host Software RAID is not active (instance {{ $labels.instance }}) + description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostRaidDiskFailure + - alert: HostSoftwareRaidDiskFailure expr: '(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' for: 2m labels: severity: warning annotations: - summary: Host RAID disk failure (instance {{ $labels.instance }}) - description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Host Software RAID disk failure (instance {{ $labels.instance }}) + description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} needs attention.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostKernelVersionDeviations - expr: '(count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'changes(node_uname_info[1h]) == 0' for: 6h labels: - severity: warning + severity: info annotations: summary: Host kernel version deviations (instance {{ $labels.instance }}) - description: "Different kernel versions are running\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Kernel version for {{ $labels.instance }} has changed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOomKillDetected expr: '(increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' @@ -292,15 +283,6 @@ groups: summary: Host Network Transmit Errors (instance {{ $labels.instance }}) description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostNetworkInterfaceSaturated - expr: '((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 1m - labels: - severity: warning - annotations: - summary: Host Network Interface Saturated (instance {{ $labels.instance }}) - description: "The network interface \"{{ $labels.device }}\" on \"{{ $labels.instance }}\" is getting overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostNetworkBondDegraded expr: '((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' for: 2m diff --git a/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml b/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml index 4334bf02c..9b82a6f42 100644 --- a/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml +++ b/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml @@ -5,13 +5,13 @@ groups: rules: - alert: SmartDeviceTemperatureWarning - expr: 'avg_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) > 60' + expr: 'avg_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) > 60' for: 0m labels: severity: warning annotations: summary: SMART device temperature warning (instance {{ $labels.instance }}) - description: "Device temperature warning (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Device temperature warning on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartDeviceTemperatureCritical expr: 'max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= 70' @@ -20,7 +20,7 @@ groups: severity: critical annotations: summary: SMART device temperature critical (instance {{ $labels.instance }}) - description: "Device temperature critical (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Device temperature critical on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartDeviceTemperatureWasOverTripValue expr: 'max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) smartctl_device_temperature{temperature_type="drive_trip"}' @@ -29,7 +29,7 @@ groups: severity: critical annotations: summary: SMART device temperature was over trip value (instance {{ $labels.instance }}) - description: "Device temperature over trip value (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Device temperature over trip value on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartStatus expr: 'smartctl_device_smart_status != 1' @@ -38,7 +38,7 @@ groups: severity: critical annotations: summary: SMART status (instance {{ $labels.instance }}) - description: "Device has a SMART status failure (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Device has a SMART status failure on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartCriticalWarning expr: 'smartctl_device_critical_warning > 0' @@ -47,7 +47,7 @@ groups: severity: critical annotations: summary: SMART critical warning (instance {{ $labels.instance }}) - description: "Disk controller has critical warning (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Disk controller has critical warning on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartMediaErrors expr: 'smartctl_device_media_errors > 0' @@ -56,7 +56,7 @@ groups: severity: critical annotations: summary: SMART media errors (instance {{ $labels.instance }}) - description: "Disk controller detected media errors (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Disk controller detected media errors on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartWearoutIndicator expr: 'smartctl_device_available_spare < smartctl_device_available_spare_threshold' @@ -65,4 +65,4 @@ groups: severity: critical annotations: summary: SMART Wearout Indicator (instance {{ $labels.instance }}) - description: "Device is wearing out (instance {{ $labels.instance }}, drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Device is wearing out on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" From 87ee1292e7bb395a4df601227d3176e177832849 Mon Sep 17 00:00:00 2001 From: Evi Vanoost Date: Sat, 24 Feb 2024 19:30:22 -0500 Subject: [PATCH 09/21] Minor bug fixes --- _data/rules.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/_data/rules.yml b/_data/rules.yml index 8cb096359..afd85dd95 100644 --- a/_data/rules.yml +++ b/_data/rules.yml @@ -262,9 +262,9 @@ groups: description: "Physical node temperature alarm triggered" query: '(node_hwmon_temp_crit_alarm_celsius == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: critical - - name: Host Software RAID is not active + - name: Host Software RAID insufficient drives description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining." - query: '(node_md_disks_required - on(device, instance) node_md_disks{state="active"}) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: critical - name: Host Software RAID disk failure description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} needs attention." @@ -273,7 +273,7 @@ groups: for: 2m - name: Host kernel version deviations description: Kernel version for {{ $labels.instance }} has changed - query: 'changes(node_uname_info[1h]) == 0' + query: 'changes(node_uname_info[1h]) > 0' severity: info for: 6h - name: Host OOM kill detected From 45a711f92189751644398240a08ecf41c79b7ba2 Mon Sep 17 00:00:00 2001 From: samber Date: Sun, 25 Feb 2024 00:32:13 +0000 Subject: [PATCH 10/21] Publish --- dist/rules/host-and-hardware/node-exporter.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dist/rules/host-and-hardware/node-exporter.yml b/dist/rules/host-and-hardware/node-exporter.yml index e5fa55cca..32a3d7125 100644 --- a/dist/rules/host-and-hardware/node-exporter.yml +++ b/dist/rules/host-and-hardware/node-exporter.yml @@ -211,13 +211,13 @@ groups: summary: Host node overtemperature alarm (instance {{ $labels.instance }}) description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostSoftwareRaidIsNotActive - expr: '(node_md_disks_required - on(device, instance) node_md_disks{state="active"}) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - alert: HostSoftwareRaidInsufficientDrives + expr: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' for: 0m labels: severity: critical annotations: - summary: Host Software RAID is not active (instance {{ $labels.instance }}) + summary: Host Software RAID insufficient drives (instance {{ $labels.instance }}) description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostSoftwareRaidDiskFailure @@ -230,7 +230,7 @@ groups: description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} needs attention.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostKernelVersionDeviations - expr: 'changes(node_uname_info[1h]) == 0' + expr: 'changes(node_uname_info[1h]) > 0' for: 6h labels: severity: info From 46043360de5b8526c489c79bccdec6d204833ef0 Mon Sep 17 00:00:00 2001 From: Evi Vanoost Date: Sun, 25 Feb 2024 14:53:30 -0500 Subject: [PATCH 11/21] Removed queries that throw errors when systems are upgraded. Also fixed and simplified a few Postgres queries. --- _data/rules.yml | 102 +++++++++++++++++++++++++++--------------------- 1 file changed, 57 insertions(+), 45 deletions(-) diff --git a/_data/rules.yml b/_data/rules.yml index afd85dd95..269dde5f5 100644 --- a/_data/rules.yml +++ b/_data/rules.yml @@ -137,37 +137,37 @@ groups: rules: - name: Host out of memory description: Node memory is filling up (< 10% left) - query: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10)' severity: warning for: 2m - name: Host memory under memory pressure description: The node is under heavy memory pressure. High rate of loading memory pages from disk. - query: '(rate(node_vmstat_pgmajfault[5m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(rate(node_vmstat_pgmajfault[5m]) > 1000)' severity: warning - name: Host Memory is underutilized description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})" # We use MemFree, many buffers (ZFS, databases etc) are declared as available memory, but would perform poorly if reduced - query: '((avg_over_time(node_memory_MemFree_bytes[30m]) / node_memory_MemTotal_bytes) > .80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((avg_over_time(node_memory_MemFree_bytes[30m]) / node_memory_MemTotal_bytes) > .80)' severity: info for: 1w comments: | You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly - name: Host unusual network throughput in description: "Host receive bandwidth is high (>80%)" - query: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)' severity: warning - name: Host unusual network throughput out description: "Host transmit bandwidth is high (>80%)" - query: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)' severity: warning - name: Host unusual disk read rate description: "Disk is too busy (IO wait > 80%)" - query: '(rate(node_disk_io_time_seconds_total[5m]) > .80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(rate(node_disk_io_time_seconds_total[5m]) > .80)' severity: warning - name: Host out of disk space description: "Disk is almost full (< 10% left)" # Network filesystems have quotas etc. and should not be included in this alert - query: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0)' severity: critical comments: | Please add ignored mountpoints in node_exporter parameters like @@ -185,7 +185,7 @@ groups: for: 2m - name: Host out of inodes description: Disk is almost running out of available inodes (< 10% left) - query: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0)' severity: critical for: 2m - name: Host filesystem device error @@ -204,41 +204,41 @@ groups: for: 2m - name: Host unusual disk read latency description: Disk latency is growing (read operations > 100ms) - query: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0)' severity: warning for: 2m - name: Host unusual disk write latency description: Disk latency is growing (write operations > 100ms) - query: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0)' severity: warning for: 2m - name: Host high CPU load description: CPU load is > 80% - query: '((avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80)' severity: warning for: 10m - name: Host CPU is underutilized description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs." - query: '((avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > .80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > .80' severity: info for: 1w comments: | You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly - name: Host CPU steal noisy neighbor description: CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit. - query: '(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: 'avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10' severity: warning - name: Host CPU high iowait description: CPU iowait > 10%. Your CPU is idling waiting for storage to respond. - query: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: 'avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10' severity: warning - name: Host unusual disk IO description: "Disk usage >80%. Check storage for issues or increase IOPS capabilities." - query: '(rate(node_disk_io_time_seconds_total[5m]) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: 'rate(node_disk_io_time_seconds_total[5m]) > 0.8' severity: warning - name: Host context switching description: Context switching is growing on the node (> 10000 / CPU / s) - query: '((rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 10000' severity: warning comments: | 10000 context switches is an arbitrary number. @@ -246,81 +246,82 @@ groups: Please read: https://github.com/samber/awesome-prometheus-alerts/issues/58 - name: Host swap is filling up description: Swap is filling up (>80%) - query: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80)' severity: warning for: 2m - name: Host systemd service crashed description: "systemd service crashed" - query: '(node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_systemd_unit_state{state="failed"} == 1)' severity: warning - name: Host physical component too hot description: "Physical hardware component too hot" - query: '((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: 'node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75' severity: warning for: 5m - name: Host node overtemperature alarm description: "Physical node temperature alarm triggered" - query: '(node_hwmon_temp_crit_alarm_celsius == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_hwmon_temp_crit_alarm_celsius == 1)' severity: critical - name: Host Software RAID insufficient drives description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining." - query: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0)' severity: critical - name: Host Software RAID disk failure description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} needs attention." - query: '(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_md_disks{state="failed"} > 0)' severity: warning for: 2m - name: Host kernel version deviations description: Kernel version for {{ $labels.instance }} has changed query: 'changes(node_uname_info[1h]) > 0' + comments: | + This alert may happen when the host is rebooted after a software update. severity: info - for: 6h - name: Host OOM kill detected description: OOM kill detected - query: '(increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - severity: warning + query: '(increase(node_vmstat_oom_kill[1m]) > 0)' + severity: critical - name: Host EDAC Correctable Errors detected description: 'Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} correctable memory errors reported by EDAC in the last 5 minutes.' - query: '(increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(increase(node_edac_correctable_errors_total[1m]) > 0)' severity: info - name: Host EDAC Uncorrectable Errors detected description: 'Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.' - query: '(node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_edac_uncorrectable_errors_total > 0)' severity: warning - name: Host Network Receive Errors description: 'Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.' - query: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01)' severity: warning for: 2m - name: Host Network Transmit Errors description: 'Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.' - query: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01)' severity: warning for: 2m - name: Host Network Bond Degraded description: 'Bond "{{ $labels.device }}" degraded on "{{ $labels.instance }}".' - query: '((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((node_bonding_active - node_bonding_slaves) != 0)' severity: warning for: 2m - name: Host conntrack limit description: "The number of conntrack is approaching limit" - query: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8)' severity: warning for: 5m - name: Host clock skew description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host." - query: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0))' severity: warning for: 10m - name: Host clock not synchronising description: "Clock not synchronising. Ensure NTP is configured on this host." - query: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16)' severity: warning for: 2m - name: Host requires reboot description: "{{ $labels.instance }} requires a reboot." - query: '(node_reboot_required > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_reboot_required > 0)' severity: info for: 4h @@ -331,18 +332,22 @@ groups: doc_url: https://github.com/prometheus-community/smartctl_exporter rules: - name: SMART device temperature warning - description: Device temperature warning on {{ $labels.instance }} drive {{ $labels.device }}) - query: avg_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) > 60 + description: Device temperature warning on {{ $labels.instance }} drive {{ $labels.device }} over 60°C + query: (avg_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) unless on (instance, device) smartctl_device_temperature{temperature_type="drive_trip"}) > 60 severity: warning - name: SMART device temperature critical - description: Device temperature critical on {{ $labels.instance }} drive {{ $labels.device }}) - query: max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= 70 + description: Device temperature critical on {{ $labels.instance }} drive {{ $labels.device }} over 70°C + query: (max_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) unless on (instance, device) smartctl_device_temperature{temperature_type="drive_trip"}) > 70 severity: critical # Datacenter drives have a trip temperature - - name: SMART device temperature was over trip value + - name: SMART device temperature over trip value description: Device temperature over trip value on {{ $labels.instance }} drive {{ $labels.device }}) query: max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) smartctl_device_temperature{temperature_type="drive_trip"} severity: critical + - name: SMART device temperature nearing trip value + description: Device temperature at 80% of trip value on {{ $labels.instance }} drive {{ $labels.device }}) + query: max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) (smartctl_device_temperature{temperature_type="drive_trip"} * .80) + severity: warning - name: SMART status description: Device has a SMART status failure on {{ $labels.instance }} drive {{ $labels.device }}) query: smartctl_device_smart_status != 1 @@ -355,6 +360,8 @@ groups: description: Disk controller detected media errors on {{ $labels.instance }} drive {{ $labels.device }}) query: smartctl_device_media_errors > 0 severity: critical + comments: | + Media errors are a sign of a failing disk. Replace the disk as soon as possible. - name: SMART Wearout Indicator description: Device is wearing out on {{ $labels.instance }} drive {{ $labels.device }}) # The threshold is not present on devices that do not support it @@ -646,13 +653,15 @@ groups: - name: Postgresql too many connections description: PostgreSQL instance has too many connections (> 80%). query: "sum by (instance, job, server) (pg_stat_activity_count) > min by (instance, job, server) (pg_settings_max_connections * 0.8)" - severity: warning + severity: critical for: 2m - name: Postgresql not enough connections description: PostgreSQL instance should have more connections (> 5) query: 'sum by (datname) (pg_stat_activity_count{datname!~"template.*|postgres"}) < 5' - severity: warning + severity: critical for: 2m + comments: | + If the number of connections is too low, it may indicate that the application has died. - name: Postgresql dead locks description: PostgreSQL has dead-locks query: 'increase(pg_stat_database_deadlocks{datname!~"template.*|postgres"}[1m]) > 5' @@ -663,9 +672,9 @@ groups: severity: warning - name: Postgresql commit rate low description: Postgresql seems to be processing very few transactions - query: "rate(pg_stat_database_xact_commit[1m]) < 10" + query: 'increase(pg_stat_database_xact_commit{datname!~"template.*|postgres",datid!="0"}[1m]) < 5' severity: critical - for: 2m + for: 5m - name: Postgresql low XID consumption description: Postgresql seems to be consuming transaction IDs very slowly query: "rate(pg_txid_current[1m]) < 5" @@ -691,12 +700,15 @@ groups: for: 2m - name: Postgresql configuration changed description: Postgres Database configuration change has occurred - query: '{__name__=~"pg_settings_.*"} != ON(__name__) {__name__=~"pg_settings_([^t]|t[^r]|tr[^a]|tra[^n]|tran[^s]|trans[^a]|transa[^c]|transac[^t]|transact[^i]|transacti[^o]|transactio[^n]|transaction[^_]|transaction_[^r]|transaction_r[^e]|transaction_re[^a]|transaction_rea[^d]|transaction_read[^_]|transaction_read_[^o]|transaction_read_o[^n]|transaction_read_on[^l]|transaction_read_onl[^y]).*"} OFFSET 5m' + query: 'changes(label_replace({__name__=~"pg_settings_.*"},"name","$1","__name__", "(.+)")[1h:]) > 0' severity: info - name: Postgresql SSL compression active - description: Database connections with SSL compression enabled. This may add significant jitter in replication delay. Replicas should turn off SSL compression via `sslcompression=0` in `recovery.conf`. + description: Database allows connections with SSL compression enabled. query: "sum(pg_stat_ssl_compression) > 0" severity: critical + comments: | + TLS compression is a security risk and should be disabled. It has been removed for TLSv1.3. + https://www.bytebase.com/docs/slow-query/enable-pg-stat-statements-for-postgresql/ - name: Postgresql too many locks acquired description: Too many locks acquired on the database. If this alert happens frequently, we may need to increase the postgres setting max_locks_per_transaction. query: "((sum (pg_locks_count)) / (pg_settings_max_locks_per_transaction * pg_settings_max_connections)) > 0.20" From c026db7e52a1ae68978f9cefa3113fa7c5c499b4 Mon Sep 17 00:00:00 2001 From: samber Date: Sun, 25 Feb 2024 19:55:46 +0000 Subject: [PATCH 12/21] Publish --- .../rules/host-and-hardware/node-exporter.yml | 68 +++++++++---------- dist/rules/postgresql/postgres-exporter.yml | 12 ++-- .../smartctl-exporter.yml | 21 ++++-- 3 files changed, 55 insertions(+), 46 deletions(-) diff --git a/dist/rules/host-and-hardware/node-exporter.yml b/dist/rules/host-and-hardware/node-exporter.yml index 32a3d7125..79506c55b 100644 --- a/dist/rules/host-and-hardware/node-exporter.yml +++ b/dist/rules/host-and-hardware/node-exporter.yml @@ -5,7 +5,7 @@ groups: rules: - alert: HostOutOfMemory - expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10)' for: 2m labels: severity: warning @@ -14,7 +14,7 @@ groups: description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostMemoryUnderMemoryPressure - expr: '(rate(node_vmstat_pgmajfault[5m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_vmstat_pgmajfault[5m]) > 1000)' for: 0m labels: severity: warning @@ -23,7 +23,7 @@ groups: description: "The node is under heavy memory pressure. High rate of loading memory pages from disk.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostMemoryIsUnderutilized - expr: '((avg_over_time(node_memory_MemFree_bytes[30m]) / node_memory_MemTotal_bytes) > .80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((avg_over_time(node_memory_MemFree_bytes[30m]) / node_memory_MemTotal_bytes) > .80)' for: 1w labels: severity: info @@ -32,7 +32,7 @@ groups: description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualNetworkThroughputIn - expr: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)' for: 0m labels: severity: warning @@ -41,7 +41,7 @@ groups: description: "Host receive bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualNetworkThroughputOut - expr: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)' for: 0m labels: severity: warning @@ -50,7 +50,7 @@ groups: description: "Host transmit bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskReadRate - expr: '(rate(node_disk_io_time_seconds_total[5m]) > .80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_disk_io_time_seconds_total[5m]) > .80)' for: 0m labels: severity: warning @@ -59,7 +59,7 @@ groups: description: "Disk is too busy (IO wait > 80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOutOfDiskSpace - expr: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0)' for: 2m labels: severity: critical @@ -77,7 +77,7 @@ groups: description: "Filesystem will likely run out of space within the next 24 hours\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOutOfInodes - expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0)' for: 2m labels: severity: critical @@ -104,7 +104,7 @@ groups: description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskReadLatency - expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0)' for: 2m labels: severity: warning @@ -113,7 +113,7 @@ groups: description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskWriteLatency - expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0)' for: 2m labels: severity: warning @@ -122,7 +122,7 @@ groups: description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostHighCpuLoad - expr: '((avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80)' for: 10m labels: severity: warning @@ -131,7 +131,7 @@ groups: description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuIsUnderutilized - expr: '((avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > .80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > .80' for: 1w labels: severity: info @@ -140,7 +140,7 @@ groups: description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuStealNoisyNeighbor - expr: '(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10' for: 0m labels: severity: warning @@ -149,7 +149,7 @@ groups: description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuHighIowait - expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10' for: 0m labels: severity: warning @@ -158,7 +158,7 @@ groups: description: "CPU iowait > 10%. Your CPU is idling waiting for storage to respond.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskIo - expr: '(rate(node_disk_io_time_seconds_total[5m]) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'rate(node_disk_io_time_seconds_total[5m]) > 0.8' for: 0m labels: severity: warning @@ -167,7 +167,7 @@ groups: description: "Disk usage >80%. Check storage for issues or increase IOPS capabilities.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostContextSwitching - expr: '((rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 10000' for: 0m labels: severity: warning @@ -176,7 +176,7 @@ groups: description: "Context switching is growing on the node (> 10000 / CPU / s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostSwapIsFillingUp - expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80)' for: 2m labels: severity: warning @@ -185,7 +185,7 @@ groups: description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostSystemdServiceCrashed - expr: '(node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_systemd_unit_state{state="failed"} == 1)' for: 0m labels: severity: warning @@ -194,7 +194,7 @@ groups: description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostPhysicalComponentTooHot - expr: '((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75' for: 5m labels: severity: warning @@ -203,7 +203,7 @@ groups: description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostNodeOvertemperatureAlarm - expr: '(node_hwmon_temp_crit_alarm_celsius == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_hwmon_temp_crit_alarm_celsius == 1)' for: 0m labels: severity: critical @@ -212,7 +212,7 @@ groups: description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostSoftwareRaidInsufficientDrives - expr: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0)' for: 0m labels: severity: critical @@ -221,7 +221,7 @@ groups: description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostSoftwareRaidDiskFailure - expr: '(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_md_disks{state="failed"} > 0)' for: 2m labels: severity: warning @@ -231,7 +231,7 @@ groups: - alert: HostKernelVersionDeviations expr: 'changes(node_uname_info[1h]) > 0' - for: 6h + for: 0m labels: severity: info annotations: @@ -239,16 +239,16 @@ groups: description: "Kernel version for {{ $labels.instance }} has changed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOomKillDetected - expr: '(increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(increase(node_vmstat_oom_kill[1m]) > 0)' for: 0m labels: - severity: warning + severity: critical annotations: summary: Host OOM kill detected (instance {{ $labels.instance }}) description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostEdacCorrectableErrorsDetected - expr: '(increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(increase(node_edac_correctable_errors_total[1m]) > 0)' for: 0m labels: severity: info @@ -257,7 +257,7 @@ groups: description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostEdacUncorrectableErrorsDetected - expr: '(node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_edac_uncorrectable_errors_total > 0)' for: 0m labels: severity: warning @@ -266,7 +266,7 @@ groups: description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostNetworkReceiveErrors - expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01)' for: 2m labels: severity: warning @@ -275,7 +275,7 @@ groups: description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostNetworkTransmitErrors - expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01)' for: 2m labels: severity: warning @@ -284,7 +284,7 @@ groups: description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostNetworkBondDegraded - expr: '((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((node_bonding_active - node_bonding_slaves) != 0)' for: 2m labels: severity: warning @@ -293,7 +293,7 @@ groups: description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostConntrackLimit - expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8)' for: 5m labels: severity: warning @@ -302,7 +302,7 @@ groups: description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostClockSkew - expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0))' for: 10m labels: severity: warning @@ -311,7 +311,7 @@ groups: description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostClockNotSynchronising - expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16)' for: 2m labels: severity: warning @@ -320,7 +320,7 @@ groups: description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostRequiresReboot - expr: '(node_reboot_required > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_reboot_required > 0)' for: 4h labels: severity: info diff --git a/dist/rules/postgresql/postgres-exporter.yml b/dist/rules/postgresql/postgres-exporter.yml index 0e1f4738d..c9a93b1f2 100644 --- a/dist/rules/postgresql/postgres-exporter.yml +++ b/dist/rules/postgresql/postgres-exporter.yml @@ -53,7 +53,7 @@ groups: expr: 'sum by (instance, job, server) (pg_stat_activity_count) > min by (instance, job, server) (pg_settings_max_connections * 0.8)' for: 2m labels: - severity: warning + severity: critical annotations: summary: Postgresql too many connections (instance {{ $labels.instance }}) description: "PostgreSQL instance has too many connections (> 80%).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" @@ -62,7 +62,7 @@ groups: expr: 'sum by (datname) (pg_stat_activity_count{datname!~"template.*|postgres"}) < 5' for: 2m labels: - severity: warning + severity: critical annotations: summary: Postgresql not enough connections (instance {{ $labels.instance }}) description: "PostgreSQL instance should have more connections (> 5)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" @@ -86,8 +86,8 @@ groups: description: "Ratio of transactions being aborted compared to committed is > 2 %\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlCommitRateLow - expr: 'rate(pg_stat_database_xact_commit[1m]) < 10' - for: 2m + expr: 'increase(pg_stat_database_xact_commit{datname!~"template.*|postgres",datid!="0"}[1m]) < 5' + for: 5m labels: severity: critical annotations: @@ -140,7 +140,7 @@ groups: description: "PostgreSQL dead tuples is too large\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlConfigurationChanged - expr: '{__name__=~"pg_settings_.*"} != ON(__name__) {__name__=~"pg_settings_([^t]|t[^r]|tr[^a]|tra[^n]|tran[^s]|trans[^a]|transa[^c]|transac[^t]|transact[^i]|transacti[^o]|transactio[^n]|transaction[^_]|transaction_[^r]|transaction_r[^e]|transaction_re[^a]|transaction_rea[^d]|transaction_read[^_]|transaction_read_[^o]|transaction_read_o[^n]|transaction_read_on[^l]|transaction_read_onl[^y]).*"} OFFSET 5m' + expr: 'changes(label_replace({__name__=~"pg_settings_.*"},"name","$1","__name__", "(.+)")[1h:]) > 0' for: 0m labels: severity: info @@ -155,7 +155,7 @@ groups: severity: critical annotations: summary: Postgresql SSL compression active (instance {{ $labels.instance }}) - description: "Database connections with SSL compression enabled. This may add significant jitter in replication delay. Replicas should turn off SSL compression via `sslcompression=0` in `recovery.conf`.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Database allows connections with SSL compression enabled.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlTooManyLocksAcquired expr: '((sum (pg_locks_count)) / (pg_settings_max_locks_per_transaction * pg_settings_max_connections)) > 0.20' diff --git a/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml b/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml index 9b82a6f42..866d71524 100644 --- a/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml +++ b/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml @@ -5,32 +5,41 @@ groups: rules: - alert: SmartDeviceTemperatureWarning - expr: 'avg_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) > 60' + expr: '(avg_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) unless on (instance, device) smartctl_device_temperature{temperature_type="drive_trip"}) > 60' for: 0m labels: severity: warning annotations: summary: SMART device temperature warning (instance {{ $labels.instance }}) - description: "Device temperature warning on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Device temperature warning on {{ $labels.instance }} drive {{ $labels.device }} over 60°C\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartDeviceTemperatureCritical - expr: 'max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= 70' + expr: '(max_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) unless on (instance, device) smartctl_device_temperature{temperature_type="drive_trip"}) > 70' for: 0m labels: severity: critical annotations: summary: SMART device temperature critical (instance {{ $labels.instance }}) - description: "Device temperature critical on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Device temperature critical on {{ $labels.instance }} drive {{ $labels.device }} over 70°C\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: SmartDeviceTemperatureWasOverTripValue + - alert: SmartDeviceTemperatureOverTripValue expr: 'max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) smartctl_device_temperature{temperature_type="drive_trip"}' for: 0m labels: severity: critical annotations: - summary: SMART device temperature was over trip value (instance {{ $labels.instance }}) + summary: SMART device temperature over trip value (instance {{ $labels.instance }}) description: "Device temperature over trip value on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + - alert: SmartDeviceTemperatureNearingTripValue + expr: 'max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) (smartctl_device_temperature{temperature_type="drive_trip"} * .80)' + for: 0m + labels: + severity: warning + annotations: + summary: SMART device temperature nearing trip value (instance {{ $labels.instance }}) + description: "Device temperature at 80% of trip value on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + - alert: SmartStatus expr: 'smartctl_device_smart_status != 1' for: 0m From 224e6d00a9a327b4394af19f0ed97ff7cf5a4241 Mon Sep 17 00:00:00 2001 From: Evi Vanoost Date: Wed, 6 Mar 2024 11:13:48 -0500 Subject: [PATCH 13/21] Refined some more queries --- .github/workflows/dist.yml | 2 -- _data/rules.yml | 21 ++++++++++++--------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index e0ba5c8a5..4929280ac 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -9,8 +9,6 @@ on: jobs: publish: name: Publish - # Check if the PR is not from a fork or manually executed - if: ${{ (github.repository_owner == 'samber') || (github.event_name == 'workflow_dispatch') }} runs-on: ubuntu-latest steps: - name: Checkout Repo diff --git a/_data/rules.yml b/_data/rules.yml index 269dde5f5..0601c2b0f 100644 --- a/_data/rules.yml +++ b/_data/rules.yml @@ -126,7 +126,8 @@ groups: severity: critical - name: Prometheus timeseries cardinality description: 'The "{{ $labels.name }}" timeseries cardinality is getting very high: {{ $value }}' - query: 'label_replace(count by(__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") > 10000' + # Node CPU seconds total and Node SystemD Unit State are always high cardinality due to systemd containing services and CPU containing cores + query: '(label_replace(count by (__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") unless on (__name__) ({__name__=~"node_cpu_seconds_total|node_systemd_unit_state"})) > 10000' severity: warning - name: Host and hardware @@ -147,9 +148,8 @@ groups: - name: Host Memory is underutilized description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})" # We use MemFree, many buffers (ZFS, databases etc) are declared as available memory, but would perform poorly if reduced - query: '((avg_over_time(node_memory_MemFree_bytes[30m]) / node_memory_MemTotal_bytes) > .80)' + query: 'min_over_time(node_memory_MemFree_bytes[1w]) > node_memory_MemTotal_bytes * .8' severity: info - for: 1w comments: | You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly - name: Host unusual network throughput in @@ -214,16 +214,16 @@ groups: for: 2m - name: Host high CPU load description: CPU load is > 80% - query: '((avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80)' + query: '(avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80' severity: warning for: 10m - name: Host CPU is underutilized description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs." - query: '(avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > .80' + query: '(min by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > 0.8' severity: info - for: 1w comments: | You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly + for: 1w - name: Host CPU steal noisy neighbor description: CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit. query: 'avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10' @@ -255,12 +255,15 @@ groups: severity: warning - name: Host physical component too hot description: "Physical hardware component too hot" - query: 'node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75' + # Some components have different max temperatures (eg. 65 for hard drive sensors, 90-100 for CPU). + # This is defined for all sensors, the crit value may not be defined for everything. + query: 'node_hwmon_temp_celsius > node_hwmon_temp_max_celsius' severity: warning for: 5m - name: Host node overtemperature alarm description: "Physical node temperature alarm triggered" - query: '(node_hwmon_temp_crit_alarm_celsius == 1)' + # This is a critical alarm, some things (eg. NVMe) have just the temp alarm. + query: 'node_hwmon_temp_crit_alarm_celsius == 1 or node_hwmon_temp_alarm == 1' severity: critical - name: Host Software RAID insufficient drives description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining." @@ -672,7 +675,7 @@ groups: severity: warning - name: Postgresql commit rate low description: Postgresql seems to be processing very few transactions - query: 'increase(pg_stat_database_xact_commit{datname!~"template.*|postgres",datid!="0"}[1m]) < 5' + query: 'increase(pg_stat_database_xact_commit{datname!~"template.*|postgres",datid!="0"}[5m]) < 5' severity: critical for: 5m - name: Postgresql low XID consumption From 7e0d0097ce3dc7cbdd2764e63657f1de726863a9 Mon Sep 17 00:00:00 2001 From: samber Date: Wed, 6 Mar 2024 16:15:04 +0000 Subject: [PATCH 14/21] Publish --- dist/rules/host-and-hardware/node-exporter.yml | 12 ++++++------ dist/rules/postgresql/postgres-exporter.yml | 2 +- .../prometheus-self-monitoring/embedded-exporter.yml | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/dist/rules/host-and-hardware/node-exporter.yml b/dist/rules/host-and-hardware/node-exporter.yml index 79506c55b..0ce500284 100644 --- a/dist/rules/host-and-hardware/node-exporter.yml +++ b/dist/rules/host-and-hardware/node-exporter.yml @@ -23,8 +23,8 @@ groups: description: "The node is under heavy memory pressure. High rate of loading memory pages from disk.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostMemoryIsUnderutilized - expr: '((avg_over_time(node_memory_MemFree_bytes[30m]) / node_memory_MemTotal_bytes) > .80)' - for: 1w + expr: 'min_over_time(node_memory_MemFree_bytes[1w]) > node_memory_MemTotal_bytes * .8' + for: 0m labels: severity: info annotations: @@ -122,7 +122,7 @@ groups: description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostHighCpuLoad - expr: '((avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80)' + expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80' for: 10m labels: severity: warning @@ -131,7 +131,7 @@ groups: description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuIsUnderutilized - expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > .80' + expr: '(min by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > 0.8' for: 1w labels: severity: info @@ -194,7 +194,7 @@ groups: description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostPhysicalComponentTooHot - expr: 'node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75' + expr: 'node_hwmon_temp_celsius > node_hwmon_temp_max_celsius' for: 5m labels: severity: warning @@ -203,7 +203,7 @@ groups: description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostNodeOvertemperatureAlarm - expr: '(node_hwmon_temp_crit_alarm_celsius == 1)' + expr: 'node_hwmon_temp_crit_alarm_celsius == 1 or node_hwmon_temp_alarm == 1' for: 0m labels: severity: critical diff --git a/dist/rules/postgresql/postgres-exporter.yml b/dist/rules/postgresql/postgres-exporter.yml index c9a93b1f2..1e96a42d7 100644 --- a/dist/rules/postgresql/postgres-exporter.yml +++ b/dist/rules/postgresql/postgres-exporter.yml @@ -86,7 +86,7 @@ groups: description: "Ratio of transactions being aborted compared to committed is > 2 %\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlCommitRateLow - expr: 'increase(pg_stat_database_xact_commit{datname!~"template.*|postgres",datid!="0"}[1m]) < 5' + expr: 'increase(pg_stat_database_xact_commit{datname!~"template.*|postgres",datid!="0"}[5m]) < 5' for: 5m labels: severity: critical diff --git a/dist/rules/prometheus-self-monitoring/embedded-exporter.yml b/dist/rules/prometheus-self-monitoring/embedded-exporter.yml index 65bfd8278..56117b000 100644 --- a/dist/rules/prometheus-self-monitoring/embedded-exporter.yml +++ b/dist/rules/prometheus-self-monitoring/embedded-exporter.yml @@ -248,7 +248,7 @@ groups: description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PrometheusTimeseriesCardinality - expr: 'label_replace(count by(__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") > 10000' + expr: '(label_replace(count by (__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") unless on (__name__) ({__name__=~"node_cpu_seconds_total|node_systemd_unit_state"})) > 10000' for: 0m labels: severity: warning From a68beee3c4551c7897c8342c3f401aa8c649f035 Mon Sep 17 00:00:00 2001 From: Evi Vanoost Date: Wed, 13 Mar 2024 15:53:56 -0400 Subject: [PATCH 15/21] PostgreSQL now has optimized autovacuum behavior --- _data/rules.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/_data/rules.yml b/_data/rules.yml index 0601c2b0f..8192b8777 100644 --- a/_data/rules.yml +++ b/_data/rules.yml @@ -645,13 +645,14 @@ groups: description: Postgresql exporter is showing errors. A query may be buggy in query.yaml query: "pg_exporter_last_scrape_error > 0" severity: critical + # With modern PostgreSQL versions, auto-vacuum doesn't run if it is not required, only after a certain number of tuples have been modified. - name: Postgresql table not auto vacuumed description: Table {{ $labels.relname }} has not been auto vacuumed for 10 days - query: "(pg_stat_user_tables_last_autovacuum > 0) and (time() - pg_stat_user_tables_last_autovacuum) > 60 * 60 * 24 * 10" + query: "((pg_stat_user_tables_n_tup_del + pg_stat_user_tables_n_tup_upd + pg_stat_user_tables_n_tup_hot_upd) > pg_settings_autovacuum_vacuum_threshold) and (time() - pg_stat_user_tables_last_autovacuum) > 864000" severity: warning - name: Postgresql table not auto analyzed description: Table {{ $labels.relname }} has not been auto analyzed for 10 days - query: "(pg_stat_user_tables_last_autoanalyze > 0) and (time() - pg_stat_user_tables_last_autoanalyze) > 24 * 60 * 60 * 10" + query: "((pg_stat_user_tables_n_tup_del + pg_stat_user_tables_n_tup_upd + pg_stat_user_tables_n_tup_hot_upd) > pg_settings_autovacuum_analyze_threshold) and (time() - pg_stat_user_tables_last_autoanalyze) > 864000" severity: warning - name: Postgresql too many connections description: PostgreSQL instance has too many connections (> 80%). From 8789b86c5945a08af3cd3740a980f7a2308690e2 Mon Sep 17 00:00:00 2001 From: samber Date: Wed, 13 Mar 2024 19:55:14 +0000 Subject: [PATCH 16/21] Publish --- dist/rules/postgresql/postgres-exporter.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dist/rules/postgresql/postgres-exporter.yml b/dist/rules/postgresql/postgres-exporter.yml index 1e96a42d7..48e8f36af 100644 --- a/dist/rules/postgresql/postgres-exporter.yml +++ b/dist/rules/postgresql/postgres-exporter.yml @@ -32,7 +32,7 @@ groups: description: "Postgresql exporter is showing errors. A query may be buggy in query.yaml\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlTableNotAutoVacuumed - expr: '(pg_stat_user_tables_last_autovacuum > 0) and (time() - pg_stat_user_tables_last_autovacuum) > 60 * 60 * 24 * 10' + expr: '((pg_stat_user_tables_n_tup_del + pg_stat_user_tables_n_tup_upd + pg_stat_user_tables_n_tup_hot_upd) > pg_settings_autovacuum_vacuum_threshold) and (time() - pg_stat_user_tables_last_autovacuum) > 864000' for: 0m labels: severity: warning @@ -41,7 +41,7 @@ groups: description: "Table {{ $labels.relname }} has not been auto vacuumed for 10 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlTableNotAutoAnalyzed - expr: '(pg_stat_user_tables_last_autoanalyze > 0) and (time() - pg_stat_user_tables_last_autoanalyze) > 24 * 60 * 60 * 10' + expr: '((pg_stat_user_tables_n_tup_del + pg_stat_user_tables_n_tup_upd + pg_stat_user_tables_n_tup_hot_upd) > pg_settings_autovacuum_analyze_threshold) and (time() - pg_stat_user_tables_last_autoanalyze) > 864000' for: 0m labels: severity: warning From c823aca7c242ab43c2c6b0abcaa1bb58c189aadb Mon Sep 17 00:00:00 2001 From: Evi Vanoost Date: Thu, 11 Apr 2024 17:35:33 -0400 Subject: [PATCH 17/21] PostgreSQL now has optimized autovacuum behavior --- _data/rules.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/_data/rules.yml b/_data/rules.yml index 4b8616250..341fa81a4 100644 --- a/_data/rules.yml +++ b/_data/rules.yml @@ -126,8 +126,8 @@ groups: severity: critical - name: Prometheus timeseries cardinality description: 'The "{{ $labels.name }}" timeseries cardinality is getting very high: {{ $value }}' - # Node CPU seconds total and Node SystemD Unit State are always high cardinality due to systemd containing services and CPU containing cores - query: '(label_replace(count by (__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") unless on (__name__) ({__name__=~"node_cpu_seconds_total|node_systemd_unit_state"})) > 10000' + # Node CPU and Node SystemD Unit State are always high cardinality due to systemd containing services and CPU containing cores + query: '(label_replace(count by (__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") unless on (__name__) ({__name__=~"node_cpu.*|node_systemd_unit_state"})) > 10000' severity: warning - name: Host and hardware From 76a86c3114953a1450fe302e8a31655d1c8c8b1a Mon Sep 17 00:00:00 2001 From: samber Date: Thu, 11 Apr 2024 21:36:45 +0000 Subject: [PATCH 18/21] Publish --- dist/rules/prometheus-self-monitoring/embedded-exporter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dist/rules/prometheus-self-monitoring/embedded-exporter.yml b/dist/rules/prometheus-self-monitoring/embedded-exporter.yml index 56117b000..82348dcd8 100644 --- a/dist/rules/prometheus-self-monitoring/embedded-exporter.yml +++ b/dist/rules/prometheus-self-monitoring/embedded-exporter.yml @@ -248,7 +248,7 @@ groups: description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PrometheusTimeseriesCardinality - expr: '(label_replace(count by (__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") unless on (__name__) ({__name__=~"node_cpu_seconds_total|node_systemd_unit_state"})) > 10000' + expr: '(label_replace(count by (__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") unless on (__name__) ({__name__=~"node_cpu.*|node_systemd_unit_state"})) > 10000' for: 0m labels: severity: warning From 6e48cba2a5f2811fad10de63db9345d4d670632f Mon Sep 17 00:00:00 2001 From: samber Date: Tue, 2 Jul 2024 17:34:29 +0000 Subject: [PATCH 19/21] Publish --- .../rules/host-and-hardware/node-exporter.yml | 146 ++++++++---------- 1 file changed, 64 insertions(+), 82 deletions(-) diff --git a/dist/rules/host-and-hardware/node-exporter.yml b/dist/rules/host-and-hardware/node-exporter.yml index 0d80c160a..301ae91c8 100644 --- a/dist/rules/host-and-hardware/node-exporter.yml +++ b/dist/rules/host-and-hardware/node-exporter.yml @@ -5,7 +5,7 @@ groups: rules: - alert: HostOutOfMemory - expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10)' for: 2m labels: severity: warning @@ -14,97 +14,88 @@ groups: description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostMemoryUnderMemoryPressure - expr: '(rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 2m + expr: '(rate(node_vmstat_pgmajfault[5m]) > 1000)' + for: 0m labels: severity: warning annotations: summary: Host memory under memory pressure (instance {{ $labels.instance }}) - description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "The node is under heavy memory pressure. High rate of loading memory pages from disk.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostMemoryIsUnderutilized - expr: '(100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 1w + expr: 'min_over_time(node_memory_MemFree_bytes[1w]) > node_memory_MemTotal_bytes * .8' + for: 0m labels: severity: info annotations: summary: Host Memory is underutilized (instance {{ $labels.instance }}) - description: "Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualNetworkThroughputIn - expr: '(sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)' + for: 0m labels: severity: warning annotations: summary: Host unusual network throughput in (instance {{ $labels.instance }}) - description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Host receive bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualNetworkThroughputOut - expr: '(sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)' + for: 0m labels: severity: warning annotations: summary: Host unusual network throughput out (instance {{ $labels.instance }}) - description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Host transmit bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskReadRate - expr: '(sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '(rate(node_disk_io_time_seconds_total[5m]) > .80)' + for: 0m labels: severity: warning annotations: summary: Host unusual disk read rate (instance {{ $labels.instance }}) - description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: HostUnusualDiskWriteRate - expr: '(sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 2m - labels: - severity: warning - annotations: - summary: Host unusual disk write rate (instance {{ $labels.instance }}) - description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Disk is too busy (IO wait > 80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOutOfDiskSpace - expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0)' for: 2m labels: - severity: warning + severity: critical annotations: summary: Host out of disk space (instance {{ $labels.instance }}) description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostDiskWillFillIn24Hours - expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - alert: HostDiskMayFillIn24Hours + expr: 'predict_linear(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_avail_bytes > 0' for: 2m labels: severity: warning annotations: - summary: Host disk will fill in 24 hours (instance {{ $labels.instance }}) - description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Host disk may fill in 24 hours (instance {{ $labels.instance }}) + description: "Filesystem will likely run out of space within the next 24 hours\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOutOfInodes - expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0)' for: 2m labels: - severity: warning + severity: critical annotations: summary: Host out of inodes (instance {{ $labels.instance }}) description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostFilesystemDeviceError - expr: 'node_filesystem_device_error == 1' + expr: 'node_filesystem_device_error{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} == 1' for: 2m labels: severity: critical annotations: summary: Host filesystem device error (instance {{ $labels.instance }}) - description: "{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Error stat-ing the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostInodesWillFillIn24Hours - expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"} == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'predict_linear(node_filesystem_files_free{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_files_free > 0' for: 2m labels: severity: warning @@ -113,7 +104,7 @@ groups: description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskReadLatency - expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0)' for: 2m labels: severity: warning @@ -122,7 +113,7 @@ groups: description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskWriteLatency - expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0)' for: 2m labels: severity: warning @@ -131,7 +122,7 @@ groups: description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostHighCpuLoad - expr: '(sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80' for: 10m labels: severity: warning @@ -140,16 +131,16 @@ groups: description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuIsUnderutilized - expr: '(100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(min by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > 0.8' for: 1w labels: severity: info annotations: summary: Host CPU is underutilized (instance {{ $labels.instance }}) - description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuStealNoisyNeighbor - expr: '(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10' for: 0m labels: severity: warning @@ -158,22 +149,22 @@ groups: description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuHighIowait - expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10' for: 0m labels: severity: warning annotations: summary: Host CPU high iowait (instance {{ $labels.instance }}) - description: "CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "CPU iowait > 10%. Your CPU is idling waiting for storage to respond.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskIo - expr: '(rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'rate(node_disk_io_time_seconds_total[5m]) > 0.8' for: 5m labels: severity: warning annotations: summary: Host unusual disk IO (instance {{ $labels.instance }}) - description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Disk usage >80%. Check storage for issues or increase IOPS capabilities.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostContextSwitchingHigh expr: '(rate(node_context_switches_total[15m])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) @@ -188,7 +179,7 @@ groups: description: "Context switching is growing on the node (twice the daily average during the last 15m)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostSwapIsFillingUp - expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80)' for: 2m labels: severity: warning @@ -197,7 +188,7 @@ groups: description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostSystemdServiceCrashed - expr: '(node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_systemd_unit_state{state="failed"} == 1)' for: 0m labels: severity: warning @@ -206,7 +197,7 @@ groups: description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostPhysicalComponentTooHot - expr: '((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'node_hwmon_temp_celsius > node_hwmon_temp_max_celsius' for: 5m labels: severity: warning @@ -215,7 +206,7 @@ groups: description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostNodeOvertemperatureAlarm - expr: '(node_hwmon_temp_crit_alarm_celsius == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'node_hwmon_temp_crit_alarm_celsius == 1 or node_hwmon_temp_alarm == 1' for: 0m labels: severity: critical @@ -223,44 +214,44 @@ groups: summary: Host node overtemperature alarm (instance {{ $labels.instance }}) description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostRaidArrayGotInactive - expr: '(node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - alert: HostSoftwareRaidInsufficientDrives + expr: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0)' for: 0m labels: severity: critical annotations: - summary: Host RAID array got inactive (instance {{ $labels.instance }}) - description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Host Software RAID insufficient drives (instance {{ $labels.instance }}) + description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostRaidDiskFailure - expr: '(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - alert: HostSoftwareRaidDiskFailure + expr: '(node_md_disks{state="failed"} > 0)' for: 2m labels: severity: warning annotations: - summary: Host RAID disk failure (instance {{ $labels.instance }}) - description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Host Software RAID disk failure (instance {{ $labels.instance }}) + description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} needs attention.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostKernelVersionDeviations - expr: '(count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 6h + expr: 'changes(node_uname_info[1h]) > 0' + for: 0m labels: - severity: warning + severity: info annotations: summary: Host kernel version deviations (instance {{ $labels.instance }}) - description: "Different kernel versions are running\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Kernel version for {{ $labels.instance }} has changed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOomKillDetected - expr: '(increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(increase(node_vmstat_oom_kill[1m]) > 0)' for: 0m labels: - severity: warning + severity: critical annotations: summary: Host OOM kill detected (instance {{ $labels.instance }}) description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostEdacCorrectableErrorsDetected - expr: '(increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(increase(node_edac_correctable_errors_total[1m]) > 0)' for: 0m labels: severity: info @@ -269,7 +260,7 @@ groups: description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostEdacUncorrectableErrorsDetected - expr: '(node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_edac_uncorrectable_errors_total > 0)' for: 0m labels: severity: warning @@ -278,7 +269,7 @@ groups: description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostNetworkReceiveErrors - expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01)' for: 2m labels: severity: warning @@ -287,7 +278,7 @@ groups: description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostNetworkTransmitErrors - expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01)' for: 2m labels: severity: warning @@ -295,17 +286,8 @@ groups: summary: Host Network Transmit Errors (instance {{ $labels.instance }}) description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostNetworkInterfaceSaturated - expr: '((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 1m - labels: - severity: warning - annotations: - summary: Host Network Interface Saturated (instance {{ $labels.instance }}) - description: "The network interface \"{{ $labels.device }}\" on \"{{ $labels.instance }}\" is getting overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostNetworkBondDegraded - expr: '((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((node_bonding_active - node_bonding_slaves) != 0)' for: 2m labels: severity: warning @@ -314,7 +296,7 @@ groups: description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostConntrackLimit - expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8)' for: 5m labels: severity: warning @@ -323,7 +305,7 @@ groups: description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostClockSkew - expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0))' for: 10m labels: severity: warning @@ -332,7 +314,7 @@ groups: description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostClockNotSynchronising - expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16)' for: 2m labels: severity: warning @@ -341,7 +323,7 @@ groups: description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostRequiresReboot - expr: '(node_reboot_required > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_reboot_required > 0)' for: 4h labels: severity: info From 54e2b09b3dff108f2a3741b520a650ce5d0452ce Mon Sep 17 00:00:00 2001 From: Evi Vanoost Date: Tue, 2 Jul 2024 13:49:12 -0400 Subject: [PATCH 20/21] Query fails if instance names are not unique across jobs. This fixes it. --- _data/rules.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_data/rules.yml b/_data/rules.yml index b7fd69fb9..0121157ae 100644 --- a/_data/rules.yml +++ b/_data/rules.yml @@ -27,7 +27,7 @@ groups: severity: critical - name: Prometheus target missing with warmup time description: Allow a job time to start up (10 minutes) before alerting that it's down. - query: "sum by (instance, job) ((up == 0) * on (instance) group_right(job) (node_time_seconds - node_boot_time_seconds > 600))" + query: "sum by (instance, job) ((up == 0) * on (instance) group_left (__name__) (node_time_seconds - node_boot_time_seconds > 600))" severity: critical - name: Prometheus configuration reload failure description: Prometheus configuration reload error From 9766507b4705b77b55a5e01e5865ba6bb3d04ca6 Mon Sep 17 00:00:00 2001 From: samber Date: Tue, 2 Jul 2024 17:50:28 +0000 Subject: [PATCH 21/21] Publish --- dist/rules/prometheus-self-monitoring/embedded-exporter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dist/rules/prometheus-self-monitoring/embedded-exporter.yml b/dist/rules/prometheus-self-monitoring/embedded-exporter.yml index 82348dcd8..8a2e40291 100644 --- a/dist/rules/prometheus-self-monitoring/embedded-exporter.yml +++ b/dist/rules/prometheus-self-monitoring/embedded-exporter.yml @@ -32,7 +32,7 @@ groups: description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PrometheusTargetMissingWithWarmupTime - expr: 'sum by (instance, job) ((up == 0) * on (instance) group_right(job) (node_time_seconds - node_boot_time_seconds > 600))' + expr: 'sum by (instance, job) ((up == 0) * on (instance) group_left (__name__) (node_time_seconds - node_boot_time_seconds > 600))' for: 0m labels: severity: critical