diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 1f64e366b..4929280ac 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -1,6 +1,7 @@ name: Publish on: + workflow_dispatch: push: branches: - master @@ -8,8 +9,6 @@ on: jobs: publish: name: Publish - # Check if the PR is not from a fork - if: github.repository_owner == 'samber' runs-on: ubuntu-latest steps: - name: Checkout Repo diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1fcb24bfc..02b8c3865 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -32,8 +32,8 @@ Or with Docker: docker run --rm -it -p 4000:4000 -v $(pwd):/srv/jekyll jekyll/jekyll jekyll serve ``` -Or with Docker-Compose: +Or with Docker Compose: ``` -docker-compose up -d +docker compose up -d ``` diff --git a/_data/rules.yml b/_data/rules.yml index 9b94c1732..c155775fc 100644 --- a/_data/rules.yml +++ b/_data/rules.yml @@ -27,7 +27,7 @@ groups: severity: critical - name: Prometheus target missing with warmup time description: Allow a job time to start up (10 minutes) before alerting that it's down. - query: "sum by (instance, job) ((up == 0) * on (instance) group_right(job) (node_time_seconds - node_boot_time_seconds > 600))" + query: "sum by (instance, job) ((up == 0) * on (instance) group_left (__name__) (node_time_seconds - node_boot_time_seconds > 600))" severity: critical - name: Prometheus configuration reload failure description: Prometheus configuration reload error @@ -126,7 +126,8 @@ groups: severity: critical - name: Prometheus timeseries cardinality description: 'The "{{ $labels.name }}" timeseries cardinality is getting very high: {{ $value }}' - query: 'label_replace(count by(__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") > 10000' + # Node CPU and Node SystemD Unit State are always high cardinality due to systemd containing services and CPU containing cores + query: '(label_replace(count by (__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") unless on (__name__) ({__name__=~"node_cpu.*|node_systemd_unit_state"})) > 10000' severity: warning - name: Host and hardware @@ -137,53 +138,45 @@ groups: rules: - name: Host out of memory description: Node memory is filling up (< 10% left) - query: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10)' severity: warning for: 2m - name: Host memory under memory pressure - description: The node is under heavy memory pressure. High rate of major page faults - query: '(rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: The node is under heavy memory pressure. High rate of loading memory pages from disk. + query: '(rate(node_vmstat_pgmajfault[5m]) > 1000)' severity: warning - for: 2m - name: Host Memory is underutilized - description: "Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})" - query: '(100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})" + # We use MemFree, many buffers (ZFS, databases etc) are declared as available memory, but would perform poorly if reduced + query: 'min_over_time(node_memory_MemFree_bytes[1w]) > node_memory_MemTotal_bytes * .8' severity: info - for: 1w comments: | You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly - name: Host unusual network throughput in - description: Host network interfaces are probably receiving too much data (> 100 MB/s) - query: '(sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: "Host receive bandwidth is high (>80%)" + query: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)' severity: warning - for: 5m - name: Host unusual network throughput out - description: Host network interfaces are probably sending too much data (> 100 MB/s) - query: '(sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: "Host transmit bandwidth is high (>80%)" + query: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)' severity: warning - for: 5m - name: Host unusual disk read rate - description: Disk is probably reading too much data (> 50 MB/s) - query: '(sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - severity: warning - for: 5m - - name: Host unusual disk write rate - description: Disk is probably writing too much data (> 50 MB/s) - query: '(sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: "Disk is too busy (IO wait > 80%)" + query: '(rate(node_disk_io_time_seconds_total[5m]) > .80)' severity: warning - for: 2m - name: Host out of disk space - description: Disk is almost full (< 10% left) - query: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - severity: warning + description: "Disk is almost full (< 10% left)" + # Network filesystems have quotas etc. and should not be included in this alert + query: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0)' + severity: critical comments: | Please add ignored mountpoints in node_exporter parameters like "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)". Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users. for: 2m - - name: Host disk will fill in 24 hours - description: Filesystem is predicted to run out of space within the next 24 hours at current write rate - query: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - name: Host disk may fill in 24 hours + description: Filesystem will likely run out of space within the next 24 hours + query: 'predict_linear(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_avail_bytes > 0' severity: warning comments: | Please add ignored mountpoints in node_exporter parameters like @@ -192,52 +185,57 @@ groups: for: 2m - name: Host out of inodes description: Disk is almost running out of available inodes (< 10% left) - query: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - severity: warning + query: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0)' + severity: critical for: 2m - name: Host filesystem device error - description: "{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem" - query: "node_filesystem_device_error == 1" + description: "Error stat-ing the {{ $labels.mountpoint }} filesystem" + query: 'node_filesystem_device_error{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} == 1' severity: critical + comments: | + This indicates there was a problem getting information for the filesystem via statfs. + This is usually due to permissions issues or virtual filesystems. + Please add ignored mountpoints in node_exporter parameters like + "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)". for: 2m - name: Host inodes will fill in 24 hours description: Filesystem is predicted to run out of inodes within the next 24 hours at current write rate - query: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"} == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: 'predict_linear(node_filesystem_files_free{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_files_free > 0' severity: warning for: 2m - name: Host unusual disk read latency description: Disk latency is growing (read operations > 100ms) - query: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0)' severity: warning for: 2m - name: Host unusual disk write latency description: Disk latency is growing (write operations > 100ms) - query: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0)' severity: warning for: 2m - name: Host high CPU load description: CPU load is > 80% - query: '(sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80' severity: warning for: 10m - name: Host CPU is underutilized - description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs." - query: '(100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs." + query: '(min by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > 0.8' severity: info - for: 1w comments: | You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly + for: 1w - name: Host CPU steal noisy neighbor description: CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit. - query: '(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: 'avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10' severity: warning - name: Host CPU high iowait - description: CPU iowait > 10%. A high iowait means that you are disk or network bound. - query: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: CPU iowait > 10%. Your CPU is idling waiting for storage to respond. + query: 'avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10' severity: warning - name: Host unusual disk IO - description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues." - query: '(rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + description: "Disk usage >80%. Check storage for issues or increase IOPS capabilities." + query: 'rate(node_disk_io_time_seconds_total[5m]) > 0.8' severity: warning for: 5m - name: Host context switching high @@ -253,86 +251,84 @@ groups: Please read: https://github.com/samber/awesome-prometheus-alerts/issues/58 - name: Host swap is filling up description: Swap is filling up (>80%) - query: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80)' severity: warning for: 2m - name: Host systemd service crashed description: "systemd service crashed" - query: '(node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_systemd_unit_state{state="failed"} == 1)' severity: warning - name: Host physical component too hot description: "Physical hardware component too hot" - query: '((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + # Some components have different max temperatures (eg. 65 for hard drive sensors, 90-100 for CPU). + # This is defined for all sensors, the crit value may not be defined for everything. + query: 'node_hwmon_temp_celsius > node_hwmon_temp_max_celsius' severity: warning for: 5m - name: Host node overtemperature alarm description: "Physical node temperature alarm triggered" query: '((node_hwmon_temp_crit_alarm_celsius == 1) or (node_hwmon_temp_alarm == 1)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' severity: critical - - name: Host RAID array got inactive - description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically." - query: '(node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - name: Host Software RAID insufficient drives + description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining." + query: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0)' severity: critical - - name: Host RAID disk failure - description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap" - query: '(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - name: Host Software RAID disk failure + description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} needs attention." + query: '(node_md_disks{state="failed"} > 0)' severity: warning for: 2m - name: Host kernel version deviations - description: Different kernel versions are running - query: '(count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - severity: warning - for: 6h + description: Kernel version for {{ $labels.instance }} has changed + query: 'changes(node_uname_info[1h]) > 0' + comments: | + This alert may happen when the host is rebooted after a software update. + severity: info - name: Host OOM kill detected description: OOM kill detected - query: '(increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - severity: warning + query: '(increase(node_vmstat_oom_kill[1m]) > 0)' + severity: critical - name: Host EDAC Correctable Errors detected description: 'Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} correctable memory errors reported by EDAC in the last 5 minutes.' - query: '(increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(increase(node_edac_correctable_errors_total[1m]) > 0)' severity: info - name: Host EDAC Uncorrectable Errors detected description: 'Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.' - query: '(node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_edac_uncorrectable_errors_total > 0)' severity: warning - name: Host Network Receive Errors description: 'Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.' - query: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01)' severity: warning for: 2m - name: Host Network Transmit Errors description: 'Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.' - query: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01)' severity: warning for: 2m - - name: Host Network Interface Saturated - description: 'The network interface "{{ $labels.device }}" on "{{ $labels.instance }}" is getting overloaded.' - query: '((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' # < to 10Gb to prevent +inf when max speed is unknown - severity: warning - for: 1m - name: Host Network Bond Degraded description: 'Bond "{{ $labels.device }}" degraded on "{{ $labels.instance }}".' - query: '((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((node_bonding_active - node_bonding_slaves) != 0)' severity: warning for: 2m - name: Host conntrack limit description: "The number of conntrack is approaching limit" - query: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8)' severity: warning for: 5m - name: Host clock skew description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host." - query: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0))' severity: warning for: 10m - name: Host clock not synchronising description: "Clock not synchronising. Ensure NTP is configured on this host." - query: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16)' severity: warning for: 2m - name: Host requires reboot description: "{{ $labels.instance }} requires a reboot." - query: '(node_reboot_required > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + query: '(node_reboot_required > 0)' severity: info for: 4h @@ -342,31 +338,42 @@ groups: slug: smartctl-exporter doc_url: https://github.com/prometheus-community/smartctl_exporter rules: - - name: Smart device temperature warning - description: Device temperature warning (instance {{ $labels.instance }}) - query: smartctl_device_temperature > 60 + - name: SMART device temperature warning + description: Device temperature warning on {{ $labels.instance }} drive {{ $labels.device }} over 60°C + query: (avg_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) unless on (instance, device) smartctl_device_temperature{temperature_type="drive_trip"}) > 60 severity: warning - for: 2m - - name: Smart device temperature critical - description: Device temperature critical (instance {{ $labels.instance }}) - query: smartctl_device_temperature > 80 + - name: SMART device temperature critical + description: Device temperature critical on {{ $labels.instance }} drive {{ $labels.device }} over 70°C + query: (max_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) unless on (instance, device) smartctl_device_temperature{temperature_type="drive_trip"}) > 70 severity: critical - for: 2m - - name: Smart critical warning - description: device has critical warning (instance {{ $labels.instance }}) + # Datacenter drives have a trip temperature + - name: SMART device temperature over trip value + description: Device temperature over trip value on {{ $labels.instance }} drive {{ $labels.device }}) + query: max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) smartctl_device_temperature{temperature_type="drive_trip"} + severity: critical + - name: SMART device temperature nearing trip value + description: Device temperature at 80% of trip value on {{ $labels.instance }} drive {{ $labels.device }}) + query: max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) (smartctl_device_temperature{temperature_type="drive_trip"} * .80) + severity: warning + - name: SMART status + description: Device has a SMART status failure on {{ $labels.instance }} drive {{ $labels.device }}) + query: smartctl_device_smart_status != 1 + severity: critical + - name: SMART critical warning + description: Disk controller has critical warning on {{ $labels.instance }} drive {{ $labels.device }}) query: smartctl_device_critical_warning > 0 severity: critical - for: 15m - - name: Smart media errors - description: device has media errors (instance {{ $labels.instance }}) + - name: SMART media errors + description: Disk controller detected media errors on {{ $labels.instance }} drive {{ $labels.device }}) query: smartctl_device_media_errors > 0 severity: critical - for: 15m - - name: Smart NVME Wearout Indicator - description: NVMe device is wearing out (instance {{ $labels.instance }}) - query: smartctl_device_available_spare{device=~"nvme.*"} < smartctl_device_available_spare_threshold{device=~"nvme.*"} + comments: | + Media errors are a sign of a failing disk. Replace the disk as soon as possible. + - name: SMART Wearout Indicator + description: Device is wearing out on {{ $labels.instance }} drive {{ $labels.device }}) + # The threshold is not present on devices that do not support it + query: smartctl_device_available_spare < smartctl_device_available_spare_threshold severity: critical - for: 15m - name: Docker containers exporters: @@ -647,24 +654,27 @@ groups: description: Postgresql exporter is showing errors. A query may be buggy in query.yaml query: "pg_exporter_last_scrape_error > 0" severity: critical + # With modern PostgreSQL versions, auto-vacuum doesn't run if it is not required, only after a certain number of tuples have been modified. - name: Postgresql table not auto vacuumed description: Table {{ $labels.relname }} has not been auto vacuumed for 10 days - query: "(pg_stat_user_tables_last_autovacuum > 0) and (time() - pg_stat_user_tables_last_autovacuum) > 60 * 60 * 24 * 10" + query: "((pg_stat_user_tables_n_tup_del + pg_stat_user_tables_n_tup_upd + pg_stat_user_tables_n_tup_hot_upd) > pg_settings_autovacuum_vacuum_threshold) and (time() - pg_stat_user_tables_last_autovacuum) > 864000" severity: warning - name: Postgresql table not auto analyzed description: Table {{ $labels.relname }} has not been auto analyzed for 10 days - query: "(pg_stat_user_tables_last_autoanalyze > 0) and (time() - pg_stat_user_tables_last_autoanalyze) > 24 * 60 * 60 * 10" + query: "((pg_stat_user_tables_n_tup_del + pg_stat_user_tables_n_tup_upd + pg_stat_user_tables_n_tup_hot_upd) > pg_settings_autovacuum_analyze_threshold) and (time() - pg_stat_user_tables_last_autoanalyze) > 864000" severity: warning - name: Postgresql too many connections description: PostgreSQL instance has too many connections (> 80%). query: "sum by (instance, job, server) (pg_stat_activity_count) > min by (instance, job, server) (pg_settings_max_connections * 0.8)" - severity: warning + severity: critical for: 2m - name: Postgresql not enough connections description: PostgreSQL instance should have more connections (> 5) query: 'sum by (datname) (pg_stat_activity_count{datname!~"template.*|postgres"}) < 5' - severity: warning + severity: critical for: 2m + comments: | + If the number of connections is too low, it may indicate that the application has died. - name: Postgresql dead locks description: PostgreSQL has dead-locks query: 'increase(pg_stat_database_deadlocks{datname!~"template.*|postgres"}[1m]) > 5' @@ -675,9 +685,9 @@ groups: severity: warning - name: Postgresql commit rate low description: Postgresql seems to be processing very few transactions - query: "rate(pg_stat_database_xact_commit[1m]) < 10" + query: 'increase(pg_stat_database_xact_commit{datname!~"template.*|postgres",datid!="0"}[5m]) < 5' severity: critical - for: 2m + for: 5m - name: Postgresql low XID consumption description: Postgresql seems to be consuming transaction IDs very slowly query: "rate(pg_txid_current[1m]) < 5" @@ -703,12 +713,15 @@ groups: for: 2m - name: Postgresql configuration changed description: Postgres Database configuration change has occurred - query: '{__name__=~"pg_settings_.*"} != ON(__name__) {__name__=~"pg_settings_([^t]|t[^r]|tr[^a]|tra[^n]|tran[^s]|trans[^a]|transa[^c]|transac[^t]|transact[^i]|transacti[^o]|transactio[^n]|transaction[^_]|transaction_[^r]|transaction_r[^e]|transaction_re[^a]|transaction_rea[^d]|transaction_read[^_]|transaction_read_[^o]|transaction_read_o[^n]|transaction_read_on[^l]|transaction_read_onl[^y]).*"} OFFSET 5m' + query: 'changes(label_replace({__name__=~"pg_settings_.*"},"name","$1","__name__", "(.+)")[1h:]) > 0' severity: info - name: Postgresql SSL compression active - description: Database connections with SSL compression enabled. This may add significant jitter in replication delay. Replicas should turn off SSL compression via `sslcompression=0` in `recovery.conf`. + description: Database allows connections with SSL compression enabled. query: "sum(pg_stat_ssl_compression) > 0" severity: critical + comments: | + TLS compression is a security risk and should be disabled. It has been removed for TLSv1.3. + https://www.bytebase.com/docs/slow-query/enable-pg-stat-statements-for-postgresql/ - name: Postgresql too many locks acquired description: Too many locks acquired on the database. If this alert happens frequently, we may need to increase the postgres setting max_locks_per_transaction. query: "((sum (pg_locks_count)) / (pg_settings_max_locks_per_transaction * pg_settings_max_connections)) > 0.20" @@ -1132,12 +1145,12 @@ groups: description: "The indexing latency on Elasticsearch cluster is higher than the threshold." query: "elasticsearch_indices_indexing_index_time_seconds_total / elasticsearch_indices_indexing_index_total > 0.0005" severity: warning - for: 10m + for: 10m - name: Elasticsearch High Indexing Rate description: "The indexing rate on Elasticsearch cluster is higher than the threshold." query: "sum(rate(elasticsearch_indices_indexing_index_total[1m]))> 10000" severity: warning - for: 5m + for: 5m - name: Elasticsearch High Query Rate description: "The query rate on Elasticsearch cluster is higher than the threshold." query: "sum(rate(elasticsearch_indices_search_query_total[1m])) > 100" @@ -1147,14 +1160,14 @@ groups: description: "The query latency on Elasticsearch cluster is higher than the threshold." query: "elasticsearch_indices_search_fetch_time_seconds / elasticsearch_indices_search_fetch_total > 1" severity: warning - for: 5m + for: 5m - name: Meilisearch exporters: - name: Embedded exporter slug: embedded-exporter doc_url: https://github.com/orgs/meilisearch/discussions/625 - rules: + rules: - name: Meilisearch index is empty description: Meilisearch instance is down query: 'meilisearch_index_docs_count == 0' diff --git a/dist/rules/host-and-hardware/node-exporter.yml b/dist/rules/host-and-hardware/node-exporter.yml index 6a465d9fa..89eae2390 100644 --- a/dist/rules/host-and-hardware/node-exporter.yml +++ b/dist/rules/host-and-hardware/node-exporter.yml @@ -5,7 +5,7 @@ groups: rules: - alert: HostOutOfMemory - expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10)' for: 2m labels: severity: warning @@ -14,97 +14,88 @@ groups: description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostMemoryUnderMemoryPressure - expr: '(rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 2m + expr: '(rate(node_vmstat_pgmajfault[5m]) > 1000)' + for: 0m labels: severity: warning annotations: summary: Host memory under memory pressure (instance {{ $labels.instance }}) - description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "The node is under heavy memory pressure. High rate of loading memory pages from disk.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostMemoryIsUnderutilized - expr: '(100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 1w + expr: 'min_over_time(node_memory_MemFree_bytes[1w]) > node_memory_MemTotal_bytes * .8' + for: 0m labels: severity: info annotations: summary: Host Memory is underutilized (instance {{ $labels.instance }}) - description: "Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualNetworkThroughputIn - expr: '(sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)' + for: 0m labels: severity: warning annotations: summary: Host unusual network throughput in (instance {{ $labels.instance }}) - description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Host receive bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualNetworkThroughputOut - expr: '(sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)' + for: 0m labels: severity: warning annotations: summary: Host unusual network throughput out (instance {{ $labels.instance }}) - description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Host transmit bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskReadRate - expr: '(sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '(rate(node_disk_io_time_seconds_total[5m]) > .80)' + for: 0m labels: severity: warning annotations: summary: Host unusual disk read rate (instance {{ $labels.instance }}) - description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: HostUnusualDiskWriteRate - expr: '(sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 2m - labels: - severity: warning - annotations: - summary: Host unusual disk write rate (instance {{ $labels.instance }}) - description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Disk is too busy (IO wait > 80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOutOfDiskSpace - expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0)' for: 2m labels: - severity: warning + severity: critical annotations: summary: Host out of disk space (instance {{ $labels.instance }}) description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostDiskWillFillIn24Hours - expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - alert: HostDiskMayFillIn24Hours + expr: 'predict_linear(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_avail_bytes > 0' for: 2m labels: severity: warning annotations: - summary: Host disk will fill in 24 hours (instance {{ $labels.instance }}) - description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Host disk may fill in 24 hours (instance {{ $labels.instance }}) + description: "Filesystem will likely run out of space within the next 24 hours\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOutOfInodes - expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0)' for: 2m labels: - severity: warning + severity: critical annotations: summary: Host out of inodes (instance {{ $labels.instance }}) description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostFilesystemDeviceError - expr: 'node_filesystem_device_error == 1' + expr: 'node_filesystem_device_error{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} == 1' for: 2m labels: severity: critical annotations: summary: Host filesystem device error (instance {{ $labels.instance }}) - description: "{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Error stat-ing the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostInodesWillFillIn24Hours - expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"} == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'predict_linear(node_filesystem_files_free{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_files_free > 0' for: 2m labels: severity: warning @@ -113,7 +104,7 @@ groups: description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskReadLatency - expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0)' for: 2m labels: severity: warning @@ -122,7 +113,7 @@ groups: description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskWriteLatency - expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0)' for: 2m labels: severity: warning @@ -131,7 +122,7 @@ groups: description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostHighCpuLoad - expr: '(sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80' for: 10m labels: severity: warning @@ -140,16 +131,16 @@ groups: description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuIsUnderutilized - expr: '(100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(min by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > 0.8' for: 1w labels: severity: info annotations: summary: Host CPU is underutilized (instance {{ $labels.instance }}) - description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuStealNoisyNeighbor - expr: '(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10' for: 0m labels: severity: warning @@ -158,22 +149,22 @@ groups: description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuHighIowait - expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10' for: 0m labels: severity: warning annotations: summary: Host CPU high iowait (instance {{ $labels.instance }}) - description: "CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "CPU iowait > 10%. Your CPU is idling waiting for storage to respond.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskIo - expr: '(rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'rate(node_disk_io_time_seconds_total[5m]) > 0.8' for: 5m labels: severity: warning annotations: summary: Host unusual disk IO (instance {{ $labels.instance }}) - description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Disk usage >80%. Check storage for issues or increase IOPS capabilities.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostContextSwitchingHigh expr: '(rate(node_context_switches_total[15m])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) @@ -188,7 +179,7 @@ groups: description: "Context switching is growing on the node (twice the daily average during the last 15m)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostSwapIsFillingUp - expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80)' for: 2m labels: severity: warning @@ -197,7 +188,7 @@ groups: description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostSystemdServiceCrashed - expr: '(node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_systemd_unit_state{state="failed"} == 1)' for: 0m labels: severity: warning @@ -206,7 +197,7 @@ groups: description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostPhysicalComponentTooHot - expr: '((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'node_hwmon_temp_celsius > node_hwmon_temp_max_celsius' for: 5m labels: severity: warning @@ -223,44 +214,44 @@ groups: summary: Host node overtemperature alarm (instance {{ $labels.instance }}) description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostRaidArrayGotInactive - expr: '(node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - alert: HostSoftwareRaidInsufficientDrives + expr: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0)' for: 0m labels: severity: critical annotations: - summary: Host RAID array got inactive (instance {{ $labels.instance }}) - description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Host Software RAID insufficient drives (instance {{ $labels.instance }}) + description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostRaidDiskFailure - expr: '(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - alert: HostSoftwareRaidDiskFailure + expr: '(node_md_disks{state="failed"} > 0)' for: 2m labels: severity: warning annotations: - summary: Host RAID disk failure (instance {{ $labels.instance }}) - description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Host Software RAID disk failure (instance {{ $labels.instance }}) + description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} needs attention.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostKernelVersionDeviations - expr: '(count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 6h + expr: 'changes(node_uname_info[1h]) > 0' + for: 0m labels: - severity: warning + severity: info annotations: summary: Host kernel version deviations (instance {{ $labels.instance }}) - description: "Different kernel versions are running\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Kernel version for {{ $labels.instance }} has changed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOomKillDetected - expr: '(increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(increase(node_vmstat_oom_kill[1m]) > 0)' for: 0m labels: - severity: warning + severity: critical annotations: summary: Host OOM kill detected (instance {{ $labels.instance }}) description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostEdacCorrectableErrorsDetected - expr: '(increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(increase(node_edac_correctable_errors_total[1m]) > 0)' for: 0m labels: severity: info @@ -269,7 +260,7 @@ groups: description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostEdacUncorrectableErrorsDetected - expr: '(node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_edac_uncorrectable_errors_total > 0)' for: 0m labels: severity: warning @@ -278,7 +269,7 @@ groups: description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostNetworkReceiveErrors - expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01)' for: 2m labels: severity: warning @@ -287,7 +278,7 @@ groups: description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostNetworkTransmitErrors - expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01)' for: 2m labels: severity: warning @@ -295,17 +286,8 @@ groups: summary: Host Network Transmit Errors (instance {{ $labels.instance }}) description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostNetworkInterfaceSaturated - expr: '((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 1m - labels: - severity: warning - annotations: - summary: Host Network Interface Saturated (instance {{ $labels.instance }}) - description: "The network interface \"{{ $labels.device }}\" on \"{{ $labels.instance }}\" is getting overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostNetworkBondDegraded - expr: '((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((node_bonding_active - node_bonding_slaves) != 0)' for: 2m labels: severity: warning @@ -314,7 +296,7 @@ groups: description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostConntrackLimit - expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8)' for: 5m labels: severity: warning @@ -323,7 +305,7 @@ groups: description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostClockSkew - expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0))' for: 10m labels: severity: warning @@ -332,7 +314,7 @@ groups: description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostClockNotSynchronising - expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16)' for: 2m labels: severity: warning @@ -341,7 +323,7 @@ groups: description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostRequiresReboot - expr: '(node_reboot_required > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_reboot_required > 0)' for: 4h labels: severity: info diff --git a/dist/rules/postgresql/postgres-exporter.yml b/dist/rules/postgresql/postgres-exporter.yml index 2ab461fb6..5cc6e0305 100644 --- a/dist/rules/postgresql/postgres-exporter.yml +++ b/dist/rules/postgresql/postgres-exporter.yml @@ -32,7 +32,7 @@ groups: description: "Postgresql exporter is showing errors. A query may be buggy in query.yaml\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlTableNotAutoVacuumed - expr: '(pg_stat_user_tables_last_autovacuum > 0) and (time() - pg_stat_user_tables_last_autovacuum) > 60 * 60 * 24 * 10' + expr: '((pg_stat_user_tables_n_tup_del + pg_stat_user_tables_n_tup_upd + pg_stat_user_tables_n_tup_hot_upd) > pg_settings_autovacuum_vacuum_threshold) and (time() - pg_stat_user_tables_last_autovacuum) > 864000' for: 0m labels: severity: warning @@ -41,7 +41,7 @@ groups: description: "Table {{ $labels.relname }} has not been auto vacuumed for 10 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlTableNotAutoAnalyzed - expr: '(pg_stat_user_tables_last_autoanalyze > 0) and (time() - pg_stat_user_tables_last_autoanalyze) > 24 * 60 * 60 * 10' + expr: '((pg_stat_user_tables_n_tup_del + pg_stat_user_tables_n_tup_upd + pg_stat_user_tables_n_tup_hot_upd) > pg_settings_autovacuum_analyze_threshold) and (time() - pg_stat_user_tables_last_autoanalyze) > 864000' for: 0m labels: severity: warning @@ -53,7 +53,7 @@ groups: expr: 'sum by (instance, job, server) (pg_stat_activity_count) > min by (instance, job, server) (pg_settings_max_connections * 0.8)' for: 2m labels: - severity: warning + severity: critical annotations: summary: Postgresql too many connections (instance {{ $labels.instance }}) description: "PostgreSQL instance has too many connections (> 80%).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" @@ -62,7 +62,7 @@ groups: expr: 'sum by (datname) (pg_stat_activity_count{datname!~"template.*|postgres"}) < 5' for: 2m labels: - severity: warning + severity: critical annotations: summary: Postgresql not enough connections (instance {{ $labels.instance }}) description: "PostgreSQL instance should have more connections (> 5)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" @@ -86,8 +86,8 @@ groups: description: "Ratio of transactions being aborted compared to committed is > 2 %\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlCommitRateLow - expr: 'rate(pg_stat_database_xact_commit[1m]) < 10' - for: 2m + expr: 'increase(pg_stat_database_xact_commit{datname!~"template.*|postgres",datid!="0"}[5m]) < 5' + for: 5m labels: severity: critical annotations: @@ -140,7 +140,7 @@ groups: description: "PostgreSQL dead tuples is too large\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlConfigurationChanged - expr: '{__name__=~"pg_settings_.*"} != ON(__name__) {__name__=~"pg_settings_([^t]|t[^r]|tr[^a]|tra[^n]|tran[^s]|trans[^a]|transa[^c]|transac[^t]|transact[^i]|transacti[^o]|transactio[^n]|transaction[^_]|transaction_[^r]|transaction_r[^e]|transaction_re[^a]|transaction_rea[^d]|transaction_read[^_]|transaction_read_[^o]|transaction_read_o[^n]|transaction_read_on[^l]|transaction_read_onl[^y]).*"} OFFSET 5m' + expr: 'changes(label_replace({__name__=~"pg_settings_.*"},"name","$1","__name__", "(.+)")[1h:]) > 0' for: 0m labels: severity: info @@ -155,7 +155,7 @@ groups: severity: critical annotations: summary: Postgresql SSL compression active (instance {{ $labels.instance }}) - description: "Database connections with SSL compression enabled. This may add significant jitter in replication delay. Replicas should turn off SSL compression via `sslcompression=0` in `recovery.conf`.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Database allows connections with SSL compression enabled.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlTooManyLocksAcquired expr: '((sum (pg_locks_count)) / (pg_settings_max_locks_per_transaction * pg_settings_max_connections)) > 0.20' diff --git a/dist/rules/prometheus-self-monitoring/embedded-exporter.yml b/dist/rules/prometheus-self-monitoring/embedded-exporter.yml index 65bfd8278..8a2e40291 100644 --- a/dist/rules/prometheus-self-monitoring/embedded-exporter.yml +++ b/dist/rules/prometheus-self-monitoring/embedded-exporter.yml @@ -32,7 +32,7 @@ groups: description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PrometheusTargetMissingWithWarmupTime - expr: 'sum by (instance, job) ((up == 0) * on (instance) group_right(job) (node_time_seconds - node_boot_time_seconds > 600))' + expr: 'sum by (instance, job) ((up == 0) * on (instance) group_left (__name__) (node_time_seconds - node_boot_time_seconds > 600))' for: 0m labels: severity: critical @@ -248,7 +248,7 @@ groups: description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PrometheusTimeseriesCardinality - expr: 'label_replace(count by(__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") > 10000' + expr: '(label_replace(count by (__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") unless on (__name__) ({__name__=~"node_cpu.*|node_systemd_unit_state"})) > 10000' for: 0m labels: severity: warning diff --git a/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml b/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml index 1946c38e1..866d71524 100644 --- a/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml +++ b/dist/rules/s.m.a.r.t-device-monitoring/smartctl-exporter.yml @@ -5,46 +5,73 @@ groups: rules: - alert: SmartDeviceTemperatureWarning - expr: 'smartctl_device_temperature > 60' - for: 2m + expr: '(avg_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) unless on (instance, device) smartctl_device_temperature{temperature_type="drive_trip"}) > 60' + for: 0m labels: severity: warning annotations: - summary: Smart device temperature warning (instance {{ $labels.instance }}) - description: "Device temperature warning (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: SMART device temperature warning (instance {{ $labels.instance }}) + description: "Device temperature warning on {{ $labels.instance }} drive {{ $labels.device }} over 60°C\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartDeviceTemperatureCritical - expr: 'smartctl_device_temperature > 80' - for: 2m + expr: '(max_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) unless on (instance, device) smartctl_device_temperature{temperature_type="drive_trip"}) > 70' + for: 0m labels: severity: critical annotations: - summary: Smart device temperature critical (instance {{ $labels.instance }}) - description: "Device temperature critical (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: SMART device temperature critical (instance {{ $labels.instance }}) + description: "Device temperature critical on {{ $labels.instance }} drive {{ $labels.device }} over 70°C\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: SmartDeviceTemperatureOverTripValue + expr: 'max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) smartctl_device_temperature{temperature_type="drive_trip"}' + for: 0m + labels: + severity: critical + annotations: + summary: SMART device temperature over trip value (instance {{ $labels.instance }}) + description: "Device temperature over trip value on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: SmartDeviceTemperatureNearingTripValue + expr: 'max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) (smartctl_device_temperature{temperature_type="drive_trip"} * .80)' + for: 0m + labels: + severity: warning + annotations: + summary: SMART device temperature nearing trip value (instance {{ $labels.instance }}) + description: "Device temperature at 80% of trip value on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: SmartStatus + expr: 'smartctl_device_smart_status != 1' + for: 0m + labels: + severity: critical + annotations: + summary: SMART status (instance {{ $labels.instance }}) + description: "Device has a SMART status failure on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartCriticalWarning expr: 'smartctl_device_critical_warning > 0' - for: 15m + for: 0m labels: severity: critical annotations: - summary: Smart critical warning (instance {{ $labels.instance }}) - description: "device has critical warning (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: SMART critical warning (instance {{ $labels.instance }}) + description: "Disk controller has critical warning on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: SmartMediaErrors expr: 'smartctl_device_media_errors > 0' - for: 15m + for: 0m labels: severity: critical annotations: - summary: Smart media errors (instance {{ $labels.instance }}) - description: "device has media errors (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: SMART media errors (instance {{ $labels.instance }}) + description: "Disk controller detected media errors on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: SmartNvmeWearoutIndicator - expr: 'smartctl_device_available_spare{device=~"nvme.*"} < smartctl_device_available_spare_threshold{device=~"nvme.*"}' - for: 15m + - alert: SmartWearoutIndicator + expr: 'smartctl_device_available_spare < smartctl_device_available_spare_threshold' + for: 0m labels: severity: critical annotations: - summary: Smart NVME Wearout Indicator (instance {{ $labels.instance }}) - description: "NVMe device is wearing out (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: SMART Wearout Indicator (instance {{ $labels.instance }}) + description: "Device is wearing out on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"