commit 2a2024e28229fd06c9321fa4d51d3d6700cbccb3
parent c94bef8ad32ec8cf49bb35388863dec4c43f1d2e
Author: Devan Carpenter <devan@taler.net>
Date: Fri, 18 Apr 2025 01:42:50 -0500
monitoring: alertmanager setup and configuration
Diffstat:
5 files changed, 444 insertions(+), 0 deletions(-)
diff --git a/roles/monitoring/files/etc/prometheus/node-exporter-rules.yml b/roles/monitoring/files/etc/prometheus/node-exporter-rules.yml
@@ -0,0 +1,329 @@
+groups:
+
+- name: NodeExporter
+
+ rules:
+
+ - alert: HostOutOfMemory
+ expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10)'
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host out of memory (instance {{ $labels.instance }})
+ description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostMemoryUnderMemoryPressure
+ expr: '(rate(node_vmstat_pgmajfault[5m]) > 1000)'
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host memory under memory pressure (instance {{ $labels.instance }})
+ description: "The node is under heavy memory pressure. High rate of loading memory pages from disk.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostMemoryIsUnderutilized
+ expr: 'min_over_time(node_memory_MemFree_bytes[1w]) > node_memory_MemTotal_bytes * .8'
+ for: 0m
+ labels:
+ severity: info
+ annotations:
+ summary: Host Memory is underutilized (instance {{ $labels.instance }})
+ description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostUnusualNetworkThroughputIn
+ expr: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)'
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host unusual network throughput in (instance {{ $labels.instance }})
+ description: "Host receive bandwidth is high (>80%).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostUnusualNetworkThroughputOut
+ expr: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)'
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host unusual network throughput out (instance {{ $labels.instance }})
+ description: "Host transmit bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostUnusualDiskReadRate
+ expr: '(rate(node_disk_io_time_seconds_total[5m]) > .80)'
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host unusual disk read rate (instance {{ $labels.instance }})
+ description: "Disk is too busy (IO wait > 80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostOutOfDiskSpace
+ expr: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0)'
+ for: 2m
+ labels:
+ severity: critical
+ annotations:
+ summary: Host out of disk space (instance {{ $labels.instance }})
+ description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostDiskMayFillIn24Hours
+ expr: 'predict_linear(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_avail_bytes > 0'
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host disk may fill in 24 hours (instance {{ $labels.instance }})
+ description: "Filesystem will likely run out of space within the next 24 hours.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostOutOfInodes
+ expr: '(node_filesystem_files_free / node_filesystem_files < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0)'
+ for: 2m
+ labels:
+ severity: critical
+ annotations:
+ summary: Host out of inodes (instance {{ $labels.instance }})
+ description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostFilesystemDeviceError
+ expr: 'node_filesystem_device_error{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} == 1'
+ for: 2m
+ labels:
+ severity: critical
+ annotations:
+ summary: Host filesystem device error (instance {{ $labels.instance }})
+ description: "Error stat-ing the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostInodesMayFillIn24Hours
+ expr: 'predict_linear(node_filesystem_files_free{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_files_free > 0'
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host inodes may fill in 24 hours (instance {{ $labels.instance }})
+ description: "Filesystem will likely run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostUnusualDiskReadLatency
+ expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0)'
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host unusual disk read latency (instance {{ $labels.instance }})
+ description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostUnusualDiskWriteLatency
+ expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0)'
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host unusual disk write latency (instance {{ $labels.instance }})
+ description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostHighCpuLoad
+ expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80'
+ for: 10m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host high CPU load (instance {{ $labels.instance }})
+ description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostCpuIsUnderutilized
+ expr: '(min by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > 0.8'
+ for: 1w
+ labels:
+ severity: info
+ annotations:
+ summary: Host CPU is underutilized (instance {{ $labels.instance }})
+ description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostCpuStealNoisyNeighbor
+ expr: 'avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10'
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
+ description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostCpuHighIowait
+ expr: 'avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10'
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host CPU high iowait (instance {{ $labels.instance }})
+ description: "CPU iowait > 10%. Your CPU is idling waiting for storage to respond.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostUnusualDiskIo
+ expr: 'rate(node_disk_io_time_seconds_total[5m]) > 0.8'
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host unusual disk IO (instance {{ $labels.instance }})
+ description: "Disk usage >80%. Check storage for issues or increase IOPS capabilities. Check storage for issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostContextSwitchingHigh
+ expr: '(rate(node_context_switches_total[15m])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) / (rate(node_context_switches_total[1d])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) > 2'
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host context switching high (instance {{ $labels.instance }})
+ description: "Context switching is growing on the node (twice the daily average during the last 15m)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostSwapIsFillingUp
+ expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80)'
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host swap is filling up (instance {{ $labels.instance }})
+ description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostSystemdServiceCrashed
+ expr: '(node_systemd_unit_state{state="failed"} == 1)'
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host systemd service crashed (instance {{ $labels.instance }})
+ description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostPhysicalComponentTooHot
+ expr: 'node_hwmon_temp_celsius > node_hwmon_temp_max_celsius'
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host physical component too hot (instance {{ $labels.instance }})
+ description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostNodeOvertemperatureAlarm
+ expr: '((node_hwmon_temp_crit_alarm_celsius == 1) or (node_hwmon_temp_alarm == 1))'
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Host node overtemperature alarm (instance {{ $labels.instance }})
+ description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostSoftwareRaidInsufficientDrives
+ expr: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0)'
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Host software RAID insufficient drives (instance {{ $labels.instance }})
+ description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostSoftwareRaidDiskFailure
+ expr: '(node_md_disks{state="failed"} > 0)'
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host software RAID disk failure (instance {{ $labels.instance }})
+ description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} needs attention.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostKernelVersionDeviations
+ expr: 'changes(node_uname_info[1h]) > 0'
+ for: 0m
+ labels:
+ severity: info
+ annotations:
+ summary: Host kernel version deviations (instance {{ $labels.instance }})
+ description: "Kernel version for {{ $labels.instance }} has changed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostOomKillDetected
+ expr: '(increase(node_vmstat_oom_kill[1m]) > 0)'
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host OOM kill detected (instance {{ $labels.instance }})
+ description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostEdacCorrectableErrorsDetected
+ expr: '(increase(node_edac_correctable_errors_total[1m]) > 0)'
+ for: 0m
+ labels:
+ severity: info
+ annotations:
+ summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
+ description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostEdacUncorrectableErrorsDetected
+ expr: '(node_edac_uncorrectable_errors_total > 0)'
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
+ description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostNetworkReceiveErrors
+ expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01)'
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host Network Receive Errors (instance {{ $labels.instance }})
+ description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostNetworkTransmitErrors
+ expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01)'
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host Network Transmit Errors (instance {{ $labels.instance }})
+ description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostNetworkBondDegraded
+ expr: '((node_bonding_active - node_bonding_slaves) != 0)'
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host Network Bond Degraded (instance {{ $labels.instance }})
+ description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostConntrackLimit
+ expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8)'
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host conntrack limit (instance {{ $labels.instance }})
+ description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostClockSkew
+ expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0))'
+ for: 10m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host clock skew (instance {{ $labels.instance }})
+ description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostClockNotSynchronising
+ expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16)'
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host clock not synchronising (instance {{ $labels.instance }})
+ description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: HostRequiresReboot
+ expr: '(node_reboot_required > 0)'
+ for: 4h
+ labels:
+ severity: info
+ annotations:
+ summary: Host requires reboot (instance {{ $labels.instance }})
+ description: "{{ $labels.instance }} requires a reboot.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
diff --git a/roles/monitoring/files/etc/prometheus/prometheus.yml b/roles/monitoring/files/etc/prometheus/prometheus.yml
@@ -14,6 +14,7 @@ alerting:
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- "alert_rules.yml"
+ - "node-exporter-rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
diff --git a/roles/monitoring/tasks/main.yml b/roles/monitoring/tasks/main.yml
@@ -55,6 +55,7 @@
- prometheus-nginx-exporter
- prometheus-node-exporter
- prometheus-postgres-exporter
+ - prometheus-alertmanager
- prometheus
- alloy
install_recommends: false
@@ -210,6 +211,22 @@
group: root
mode: "0644"
+- name: Configure prometheus alertmanager
+ template:
+ src: templates/etc/prometheus/alertmanager.yml
+ dest: /etc/prometheus/alertmanager.yml
+ owner: root
+ group: root
+ mode: "0644"
+
+- name: Configure node-exporter rules for alertmanager
+ copy:
+ src: etc/prometheus/node-exporter-rules.yml
+ dest: /etc/prometheus/node-exporter-rules.yml
+ owner: root
+ group: root
+ mode: "0644"
+
- name: Ensure exporter services are enabled and started
service:
name: "{{ item }}"
@@ -219,5 +236,6 @@
- prometheus-node-exporter.service
- prometheus-nginx-exporter.service
- prometheus-postgres-exporter.service
+ - prometheus-alertmanager.service
- prometheus.service
- alloy.service
diff --git a/roles/monitoring/templates/etc/nginx/sites-available/monitoring-nginx.conf.j2 b/roles/monitoring/templates/etc/nginx/sites-available/monitoring-nginx.conf.j2
@@ -74,6 +74,14 @@ server {
rewrite ^/alloy/(.*)$ /$1 break;
proxy_pass http://127.0.0.1:12345/;
}
+
+ location /alertmanager/ {
+ if ($http_authorization != "Bearer {{ PROMETHEUS_ACCESS_TOKEN }}") {
+ return 401;
+ }
+ rewrite ^/alertmanager/(.*)$ /$1 break;
+ proxy_pass http://127.0.0.1:9093/;
+ }
}
diff --git a/roles/monitoring/templates/etc/prometheus/alertmanager.yml b/roles/monitoring/templates/etc/prometheus/alertmanager.yml
@@ -0,0 +1,88 @@
+# Sample configuration.
+# See https://prometheus.io/docs/alerting/configuration/ for documentation.
+
+global:
+ # The smarthost and SMTP sender used for mail notifications.
+ smtp_smarthost: 'localhost:25'
+ smtp_from: 'alertmanager@taler.net'
+ smtp_require_tls: false
+ #smtp_auth_username: 'alertmanager'
+ #smtp_auth_password: 'password'
+
+# The directory from which notification templates are read.
+templates:
+- '/etc/prometheus/alertmanager_templates/*.tmpl'
+
+# The root route on which each incoming alert enters.
+route:
+ # The labels by which incoming alerts are grouped together. For example,
+ # multiple alerts coming in for cluster=A and alertname=LatencyHigh would
+ # be batched into a single group.
+ group_by: ['alertname', 'cluster', 'service']
+
+ # When a new group of alerts is created by an incoming alert, wait at
+ # least 'group_wait' to send the initial notification.
+ # This way ensures that you get multiple alerts for the same group that start
+ # firing shortly after another are batched together on the first
+ # notification.
+ group_wait: 30s
+
+ # When the first notification was sent, wait 'group_interval' to send a batch
+ # of new alerts that started firing for that group.
+ group_interval: 5m
+
+ # If an alert has successfully been sent, wait 'repeat_interval' to
+ # resend them.
+ repeat_interval: 12h
+
+ # A default receiver
+ receiver: taler-warning-mails
+
+ # All the above attributes are inherited by all child routes and can
+ # overwritten on each.
+
+ # The child route trees.
+ routes:
+ - matchers:
+ - severity = critical
+ receiver: taler-admin-mails
+
+
+# Inhibition rules allow to mute a set of alerts given that another alert is
+# firing.
+# We use this to mute any warning-level notifications if the same alert is
+# already critical.
+inhibit_rules:
+- source_match:
+ severity: 'critical'
+ target_match:
+ severity: 'warning'
+ # Apply inhibition if the alertname is the same.
+ equal: ['alertname', 'cluster', 'service']
+
+
+receivers:
+- name: 'taler-warning-mails'
+ email_configs:
+ - to: 'warnings@taler.net'
+- name: 'taler-admin-mails'
+ email_configs:
+ - to: 'sysadmin@taler.net'
+ #
+ #- name: 'team-X-pager'
+ # email_configs:
+ # - to: 'team-X+alerts-critical@example.org'
+ # pagerduty_configs:
+ # - service_key: <team-X-key>
+ #
+ #- name: 'team-Y-mails'
+ # email_configs:
+ # - to: 'team-Y+alerts@example.org'
+ #
+ #- name: 'team-Y-pager'
+ # pagerduty_configs:
+ # - service_key: <team-Y-key>
+ #
+ #- name: 'team-DB-pager'
+ # pagerduty_configs:
+ # - service_key: <team-DB-key>