From c98d2fe2890b6742423a7bcfd0c29d0216308b6d Mon Sep 17 00:00:00 2001 From: xinyangli Date: Tue, 30 Jul 2024 16:01:17 +0800 Subject: [PATCH 1/5] refactor: move auth settings inside machine configuration to modules --- machines/dolomite/default.nix | 33 ++++++--------------------------- machines/massicot/default.nix | 33 +++++---------------------------- 2 files changed, 11 insertions(+), 55 deletions(-) diff --git a/machines/dolomite/default.nix b/machines/dolomite/default.nix index 3965655..3195b58 100644 --- a/machines/dolomite/default.nix +++ b/machines/dolomite/default.nix @@ -14,6 +14,12 @@ in config = { isBandwagon = builtins.elem config.networking.hostName bwgHosts; isLightsail = builtins.elem config.networking.hostName awsHosts; + + commonSettings = { + auth.enable = true; + nix.enable = true; + }; + sops = { secrets = { wg_private_key = { @@ -52,33 +58,6 @@ in }; }; - custom.kanidm-client = { - enable = true; - uri = "https://auth.xinyang.life/"; - asSSHAuth = { - enable = true; - allowedGroups = [ "linux_users" ]; - }; - sudoers = [ "xin@auth.xinyang.life" ]; - }; - - services.openssh = { - settings = { - PasswordAuthentication = false; - KbdInteractiveAuthentication = false; - PermitRootLogin = lib.mkForce "no"; - GSSAPIAuthentication = "no"; - KerberosAuthentication = "no"; - }; - }; - services.fail2ban.enable = true; - programs.mosh.enable = true; - - security.sudo = { - execWheelOnly = true; - wheelNeedsPassword = false; - }; - services.sing-box = let singTls = { enabled = true; diff --git a/machines/massicot/default.nix b/machines/massicot/default.nix index 56cbfe5..7656e12 100644 --- a/machines/massicot/default.nix +++ b/machines/massicot/default.nix @@ -7,6 +7,11 @@ ./networking.nix ./services.nix ]; + + customSettings = { + auth.enable = true; + nix.enable = true; + }; sops = { defaultSopsFile = ./secrets.yaml; @@ -52,34 +57,6 @@ networking = { hostName = "massicot"; }; - - custom.kanidm-client = { - enable = true; - uri = "https://auth.xinyang.life/"; - asSSHAuth = { - enable = true; - allowedGroups = [ "linux_users" ]; - }; - sudoers = [ "xin@auth.xinyang.life" ]; - }; - - security.sudo = { - execWheelOnly = true; - wheelNeedsPassword = false; - }; - - services.openssh = { - enable = true; - settings = { - PasswordAuthentication = false; - KbdInteractiveAuthentication = false; - PermitRootLogin = "no"; - GSSAPIAuthentication = "no"; - KerberosAuthentication = "no"; - }; - }; - services.fail2ban.enable = true; - programs.mosh.enable = true; systemd.services.sshd.wantedBy = pkgs.lib.mkForce [ "multi-user.target" ]; } From c88f86eec29d6672f20697db6740328d1f863b2e Mon Sep 17 00:00:00 2001 From: xinyangli Date: Tue, 30 Jul 2024 17:03:27 +0800 Subject: [PATCH 2/5] feat: enable tailscale on servers --- machines/calcite/configuration.nix | 6 ++- machines/dolomite/default.nix | 35 ++++------------- machines/massicot/default.nix | 33 +++------------- modules/nixos/prometheus.nix | 62 +++++++++++++++++++++++++++++- 4 files changed, 79 insertions(+), 57 deletions(-) diff --git a/machines/calcite/configuration.nix b/machines/calcite/configuration.nix index 4b35351..d42c585 100644 --- a/machines/calcite/configuration.nix +++ b/machines/calcite/configuration.nix @@ -208,7 +208,6 @@ element-desktop tdesktop qq - feishu # Password manager bitwarden @@ -265,6 +264,11 @@ custom.forgejo-actions-runner.enable = true; custom.forgejo-actions-runner.tokenFile = config.sops.secrets.gitea_env.path; + custom.prometheus = { + enable = true; + exporters.enable = true; + }; + # MTP support services.gvfs.enable = true; diff --git a/machines/dolomite/default.nix b/machines/dolomite/default.nix index 3965655..5bf2979 100644 --- a/machines/dolomite/default.nix +++ b/machines/dolomite/default.nix @@ -14,6 +14,12 @@ in config = { isBandwagon = builtins.elem config.networking.hostName bwgHosts; isLightsail = builtins.elem config.networking.hostName awsHosts; + + commonSettings = { + auth.enable = true; + nix.enable = true; + }; + sops = { secrets = { wg_private_key = { @@ -43,6 +49,8 @@ in networking.firewall.allowedTCPPorts = [ 80 8080 ]; networking.firewall.allowedUDPPorts = [ ] ++ (lib.range 6311 6314); + services.tailscale.enable = true; + custom.prometheus = { enable = false; exporters.enable = false; @@ -52,33 +60,6 @@ in }; }; - custom.kanidm-client = { - enable = true; - uri = "https://auth.xinyang.life/"; - asSSHAuth = { - enable = true; - allowedGroups = [ "linux_users" ]; - }; - sudoers = [ "xin@auth.xinyang.life" ]; - }; - - services.openssh = { - settings = { - PasswordAuthentication = false; - KbdInteractiveAuthentication = false; - PermitRootLogin = lib.mkForce "no"; - GSSAPIAuthentication = "no"; - KerberosAuthentication = "no"; - }; - }; - services.fail2ban.enable = true; - programs.mosh.enable = true; - - security.sudo = { - execWheelOnly = true; - wheelNeedsPassword = false; - }; - services.sing-box = let singTls = { enabled = true; diff --git a/machines/massicot/default.nix b/machines/massicot/default.nix index 56cbfe5..5ac8151 100644 --- a/machines/massicot/default.nix +++ b/machines/massicot/default.nix @@ -7,6 +7,11 @@ ./networking.nix ./services.nix ]; + + commonSettings = { + auth.enable = true; + nix.enable = true; + }; sops = { defaultSopsFile = ./secrets.yaml; @@ -52,34 +57,6 @@ networking = { hostName = "massicot"; }; - - custom.kanidm-client = { - enable = true; - uri = "https://auth.xinyang.life/"; - asSSHAuth = { - enable = true; - allowedGroups = [ "linux_users" ]; - }; - sudoers = [ "xin@auth.xinyang.life" ]; - }; - - security.sudo = { - execWheelOnly = true; - wheelNeedsPassword = false; - }; - - services.openssh = { - enable = true; - settings = { - PasswordAuthentication = false; - KbdInteractiveAuthentication = false; - PermitRootLogin = "no"; - GSSAPIAuthentication = "no"; - KerberosAuthentication = "no"; - }; - }; - services.fail2ban.enable = true; - programs.mosh.enable = true; systemd.services.sshd.wantedBy = pkgs.lib.mkForce [ "multi-user.target" ]; } diff --git a/modules/nixos/prometheus.nix b/modules/nixos/prometheus.nix index 40035f3..9d27980 100644 --- a/modules/nixos/prometheus.nix +++ b/modules/nixos/prometheus.nix @@ -26,6 +26,18 @@ in }; config = mkIf cfg.enable (mkMerge [{ + services.tailscale = { + enable = true; + permitCertUid = config.services.caddy.user; + }; + + services.caddy = { + enable = true; + virtualHosts."${config.networking.hostName}.coho-tet.ts.net".extraConfig = '' + reverse_proxy 127.0.0.1:${toString config.services.prometheus.port} + ''; + }; + services.caddy.globalConfig = '' servers { metrics @@ -103,7 +115,7 @@ in name = "ntfy"; webhook_configs = [ { - url = "${config.services.ntfy-sh.settings.base-url}/prometheus-alerts"; + url = "https://ntfy.xinyang.life/prometheus-alerts"; send_resolved = true; } ]; @@ -162,7 +174,55 @@ in '' groups: - name: restic_alerts + rules: + - alert: ResticCheckFailed + expr: restic_check_success == 0 + for: 5m + labels: + severity: critical + annotations: + summary: Restic check failed (instance {{ $labels.instance }}) + description: Restic check failed\n VALUE = {{ $value }}\n LABELS = {{ $labels }} + + - alert: ResticOutdatedBackup + # 1209600 = 15 days + expr: time() - restic_backup_timestamp > 518400 + for: 0m + labels: + severity: critical + annotations: + summary: Restic {{ $labels.client_hostname }} / {{ $labels.client_username }} backup is outdated + description: Restic backup is outdated\n VALUE = {{ $value }}\n LABELS = {{ $labels }} '' else "") + (if config.services.caddy.enable then '' + groups: + - name: caddy_alerts + rules: + - alert: HighHttpErrorRate + expr: rate(caddy_http_request_duration_seconds_count{status_code=~"5.."}[5m]) / rate(caddy_http_request_duration_seconds_count[5m]) > 0.01 + for: 10m + labels: + severity: critical + annotations: + summary: "High error rate on {{ $labels.instance }}" + description: "More than 1% of HTTP requests are errors over the last 10 minutes." + - alert: CaddyDown + expr: up{job="caddy"} == 0 + for: 5m + labels: + severity: critical + annotations: + summary: "Caddy server down on {{ $labels.instance }}" + description: "Caddy server is down for more than 5 minutes." + - alert: HighRequestLatency + expr: histogram_quantile(0.95, rate(caddy_http_request_duration_seconds_bucket[10m])) > 0.5 + for: 2m + labels: + severity: warning + annotations: + summary: "High request latency on {{ $labels.instance }}" + description: "95th percentile of request latency is above 0.5 seconds for the last 2 minutes." + '' else "") ]; }; } From 013d87afdffb5e068dc21ddd3123d558f915749c Mon Sep 17 00:00:00 2001 From: xinyangli Date: Tue, 30 Jul 2024 20:09:41 +0800 Subject: [PATCH 3/5] fix --- modules/nixos/prometheus.nix | 380 +++++++++++++++++------------------ 1 file changed, 187 insertions(+), 193 deletions(-) diff --git a/modules/nixos/prometheus.nix b/modules/nixos/prometheus.nix index 9d27980..b4a02cc 100644 --- a/modules/nixos/prometheus.nix +++ b/modules/nixos/prometheus.nix @@ -25,207 +25,194 @@ in }; }; - config = mkIf cfg.enable (mkMerge [{ - services.tailscale = { - enable = true; - permitCertUid = config.services.caddy.user; - }; - - services.caddy = { - enable = true; - virtualHosts."${config.networking.hostName}.coho-tet.ts.net".extraConfig = '' - reverse_proxy 127.0.0.1:${toString config.services.prometheus.port} - ''; - }; - - services.caddy.globalConfig = '' - servers { - metrics - } - ''; - services.restic.server.prometheus = cfg.enable; - services.gotosocial.settings = mkIf cfg.enable { - metrics-enabled = true; - }; - services.prometheus = mkIf cfg.enable { - enable = true; - port = 9091; - globalConfig.external_labels = { hostname = config.networking.hostName; }; - remoteWrite = mkIf cfg.grafana.enable [ - { - name = "grafana"; - url = "https://prometheus-prod-24-prod-eu-west-2.grafana.net/api/prom/push"; - basic_auth = { - username = "1340065"; - password_file = cfg.grafana.password_file; - }; - } - ]; - exporters = { - node = { - enable = true; - enabledCollectors = [ - "conntrack" - "diskstats" - "entropy" - "filefd" - "filesystem" - "loadavg" - "meminfo" - "netdev" - "netstat" - "stat" - "time" - "vmstat" - "systemd" - "logind" - "interrupts" - "ksmd" - ]; - port = 9100; - }; - }; - scrapeConfigs = [ - { - job_name = "prometheus"; - static_configs = [ - { targets = [ "localhost:${toString config.services.prometheus.port}" ]; } - ]; - } - { - job_name = "node"; - static_configs = [ - { targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ]; } - ]; - } - ]; - - alertmanager = { + config = mkIf cfg.enable (mkMerge [ + { + services.tailscale = { enable = true; - listenAddress = "127.0.0.1"; - extraFlags = [ - "--cluster.advertise-address=127.0.0.1:9093" - ]; - configuration = { - route = { - receiver = "ntfy"; - }; - receivers = [ + permitCertUid = config.services.caddy.user; + }; + + services.caddy = { + enable = true; + virtualHosts."${config.networking.hostName}.coho-tet.ts.net".extraConfig = '' + reverse_proxy 127.0.0.1:${toString config.services.prometheus.port} + ''; + }; + + services.caddy.globalConfig = '' + servers { + metrics + } + ''; + services.restic.server.prometheus = cfg.enable; + services.gotosocial.settings = mkIf cfg.enable { + metrics-enabled = true; + }; + services.ntfy-sh.settings.enable-metrics = true; + + services.prometheus = mkIf cfg.enable + { + enable = true; + port = 9091; + globalConfig.external_labels = { hostname = config.networking.hostName; }; + remoteWrite = mkIf cfg.grafana.enable [ { - name = "ntfy"; - webhook_configs = [ + name = "grafana"; + url = "https://prometheus-prod-24-prod-eu-west-2.grafana.net/api/prom/push"; + basic_auth = { + username = "1340065"; + password_file = cfg.grafana.password_file; + }; + } + ]; + exporters = { + node = { + enable = true; + enabledCollectors = [ + "conntrack" + "diskstats" + "entropy" + "filefd" + "filesystem" + "loadavg" + "meminfo" + "netdev" + "netstat" + "stat" + "time" + "vmstat" + "systemd" + "logind" + "interrupts" + "ksmd" + ]; + port = 9100; + }; + }; + scrapeConfigs = [ + { + job_name = "prometheus"; + static_configs = [ + { targets = [ "localhost:${toString config.services.prometheus.port}" ]; } + ]; + } + { + job_name = "node"; + static_configs = [ + { targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ]; } + ]; + } + ]; + + alertmanager = { + enable = true; + listenAddress = "127.0.0.1"; + logLevel = "debug"; + configuration = { + route = { + receiver = "ntfy"; + }; + receivers = [ { - url = "https://ntfy.xinyang.life/prometheus-alerts"; - send_resolved = true; + name = "ntfy"; + webhook_configs = [ + { + url = "https://ntfy.xinyang.life/prometheus-alerts?tpl=yes&m=${lib.escapeURL '' + Alert {{.status}} + {{range .alerts}}-----{{range $k,$v := .labels}} + {{$k}}={{$v}}{{end}} + {{end}} + ''}"; + send_resolved = true; + } + ]; + } + ]; + }; + }; + + alertmanagers = [ + { + scheme = "http"; + static_configs = [ + { + targets = [ + "${config.services.prometheus.alertmanager.listenAddress}:${toString config.services.prometheus.alertmanager.port}" + ]; } ]; } ]; - }; - }; - alertmanagers = [ - { - scheme = "http"; - path_prefix = "/alertmanager"; - static_configs = [ - { - targets = [ - "${config.services.prometheus.alertmanager.listenAddress}:${toString config.services.prometheus.alertmanager.port}" - ]; - } + rules = let mkRule = condition: { ... }@rule: (if condition then [ rule ] else [ ]); in [ + (lib.generators.toYAML { } { + groups = (mkRule true + { + name = "system_alerts"; + rules = [ + { + alert = "SystemdFailedUnits"; + expr = "node_systemd_unit_state{state=\"failed\"} > 0"; + for = "5m"; + labels = { severity = "critical"; }; + annotations = { summary = "Systemd has failed units on {{ $labels.instance }}"; description = "There are {{ $value }} failed units on {{ $labels.instance }}. Immediate attention required!"; }; + } + { + alert = "HighLoadAverage"; + expr = "node_load1 > 0.8 * count without (cpu) (node_cpu_seconds_total{mode=\"idle\"})"; + for = "1m"; + labels = { severity = "warning"; }; + annotations = { summary = "High load average detected on {{ $labels.instance }}"; description = "The 1-minute load average ({{ $value }}) exceeds 80% the number of CPUs."; }; + } + { + alert = "HighTransmitTraffic"; + expr = "rate(node_network_transmit_bytes_total{device!=\"lo\"}[5m]) > 100000000"; + for = "1m"; + labels = { severity = "warning"; }; + annotations = { summary = "High network transmit traffic on {{ $labels.instance }} ({{ $labels.device }})"; description = "The network interface {{ $labels.device }} on {{ $labels.instance }} is transmitting data at a rate exceeding 100 MB/s for the last 1 minute."; }; + } + ]; + }) ++ (mkRule config.services.restic.server.enable { + name = "restic_alerts"; + rules = [ + { + alert = "ResticCheckFailed"; + expr = "restic_check_success == 0"; + for = "5m"; + labels = { severity = "critical"; }; + annotations = { summary = "Restic check failed (instance {{ $labels.instance }})"; description = "Restic check failed\\n VALUE = {{ $value }}\\n LABELS = {{ $labels }}"; }; + } + { + alert = "ResticOutdatedBackup"; + expr = "time() - restic_backup_timestamp > 518400"; + for = "0m"; + labels = { severity = "critical"; }; + annotations = { summary = "Restic {{ $labels.client_hostname }} / {{ $labels.client_username }} backup is outdated"; description = "Restic backup is outdated\\n VALUE = {{ $value }}\\n LABELS = {{ $labels }}"; }; + } + ]; + }) ++ (mkRule config.services.caddy.enable { + name = "caddy_alerts"; + rules = [ + { + alert = "UpstreamHealthy"; + expr = "caddy_reverse_proxy_upstreams_healthy == 0"; + for = "5m"; + labels = { severity = "critical"; }; + annotations = { summary = "Upstream {{ $labels.unstream }} not healthy"; }; + } + { + alert = "HighRequestLatency"; + expr = "histogram_quantile(0.95, rate(caddy_http_request_duration_seconds_bucket[10m])) > 0.5"; + for = "2m"; + labels = { severity = "warning"; }; + annotations = { summary = "High request latency on {{ $labels.instance }}"; description = "95th percentile of request latency is above 0.5 seconds for the last 2 minutes."; }; + } + ]; + }); + }) ]; - } - ]; - - rules = [ - '' - groups: - - name: system_alerts - rules: - - alert: SystemdFailedUnits - expr: node_systemd_unit_state{state="failed"} > 0 - for: 5m - labels: - severity: critical - annotations: - summary: "Systemd has failed units on {{ $labels.instance }}" - description: "There are {{ $value }} failed units on {{ $labels.instance }}. Immediate attention required!" - - - alert: HighLoadAverage - expr: node_load1 > 0.8 * count without (cpu) (node_cpu_seconds_total{mode="idle"}) - for: 1m - labels: - severity: warning - annotations: - summary: "High load average detected on {{ $labels.instance }}" - description: "The 1-minute load average ({{ $value }}) exceeds 80% the number of CPUs." - - - alert: HighTransmitTraffic - expr: rate(node_network_transmit_bytes_total{device!="lo"}[5m]) > 100000000 - for: 1m - labels: - severity: warning - annotations: - summary: "High network transmit traffic on {{ $labels.instance }} ({{ $labels.device }})" - description: "The network interface {{ $labels.device }} on {{ $labels.instance }} is transmitting data at a rate exceeding 100 MB/s for the last 1 minute." - '' - (if config.services.restic.server.enable then - '' - groups: - - name: restic_alerts - rules: - - alert: ResticCheckFailed - expr: restic_check_success == 0 - for: 5m - labels: - severity: critical - annotations: - summary: Restic check failed (instance {{ $labels.instance }}) - description: Restic check failed\n VALUE = {{ $value }}\n LABELS = {{ $labels }} - - - alert: ResticOutdatedBackup - # 1209600 = 15 days - expr: time() - restic_backup_timestamp > 518400 - for: 0m - labels: - severity: critical - annotations: - summary: Restic {{ $labels.client_hostname }} / {{ $labels.client_username }} backup is outdated - description: Restic backup is outdated\n VALUE = {{ $value }}\n LABELS = {{ $labels }} - '' else "") - (if config.services.caddy.enable then '' - groups: - - name: caddy_alerts - rules: - - alert: HighHttpErrorRate - expr: rate(caddy_http_request_duration_seconds_count{status_code=~"5.."}[5m]) / rate(caddy_http_request_duration_seconds_count[5m]) > 0.01 - for: 10m - labels: - severity: critical - annotations: - summary: "High error rate on {{ $labels.instance }}" - description: "More than 1% of HTTP requests are errors over the last 10 minutes." - - alert: CaddyDown - expr: up{job="caddy"} == 0 - for: 5m - labels: - severity: critical - annotations: - summary: "Caddy server down on {{ $labels.instance }}" - description: "Caddy server is down for more than 5 minutes." - - alert: HighRequestLatency - expr: histogram_quantile(0.95, rate(caddy_http_request_duration_seconds_bucket[10m])) > 0.5 - for: 2m - labels: - severity: warning - annotations: - summary: "High request latency on {{ $labels.instance }}" - description: "95th percentile of request latency is above 0.5 seconds for the last 2 minutes." - '' else "") - ]; - }; - } + }; + } { services.prometheus.scrapeConfigs = [ (mkIf config.services.caddy.enable { @@ -246,6 +233,13 @@ in { targets = [ "localhost:${toString config.services.gotosocial.settings.port}" ]; } ]; }) + (mkIf config.services.ntfy-sh.enable { + job_name = "ntfy-sh"; + static_configs = [ + { targets = [ "auth.xinyang.life" ]; } + ]; + }) ]; - }]); + } + ]); } From 1830ab192c08866ecac9509091659dd2313beee1 Mon Sep 17 00:00:00 2001 From: xinyangli Date: Tue, 30 Jul 2024 17:03:27 +0800 Subject: [PATCH 4/5] feat: enable tailscale on servers --- modules/nixos/prometheus.nix | 278 ++++++++--------------------------- 1 file changed, 64 insertions(+), 214 deletions(-) diff --git a/modules/nixos/prometheus.nix b/modules/nixos/prometheus.nix index b4a02cc..9ddd255 100644 --- a/modules/nixos/prometheus.nix +++ b/modules/nixos/prometheus.nix @@ -25,221 +25,71 @@ in }; }; - config = mkIf cfg.enable (mkMerge [ - { - services.tailscale = { - enable = true; - permitCertUid = config.services.caddy.user; - }; - - services.caddy = { - enable = true; - virtualHosts."${config.networking.hostName}.coho-tet.ts.net".extraConfig = '' - reverse_proxy 127.0.0.1:${toString config.services.prometheus.port} - ''; - }; - - services.caddy.globalConfig = '' - servers { - metrics + config = mkIf cfg.enable (mkMerge [{ + services.caddy.globalConfig = '' + servers { + metrics + } + ''; + services.restic.server.prometheus = cfg.enable; + services.gotosocial.settings = { + metrics-enable = true; + }; + services.prometheus = mkIf cfg.enable { + enable = true; + port = 9091; + globalConfig.external_labels = { hostname = config.networking.hostName; }; + remoteWrite = mkIf cfg.grafana.enable [ + { name = "grafana"; + url = "https://prometheus-prod-24-prod-eu-west-2.grafana.net/api/prom/push"; + basic_auth = { + username = "1340065"; + password_file = cfg.grafana.password_file; + }; } - ''; - services.restic.server.prometheus = cfg.enable; - services.gotosocial.settings = mkIf cfg.enable { - metrics-enabled = true; - }; - services.ntfy-sh.settings.enable-metrics = true; - - services.prometheus = mkIf cfg.enable - { - enable = true; - port = 9091; - globalConfig.external_labels = { hostname = config.networking.hostName; }; - remoteWrite = mkIf cfg.grafana.enable [ - { - name = "grafana"; - url = "https://prometheus-prod-24-prod-eu-west-2.grafana.net/api/prom/push"; - basic_auth = { - username = "1340065"; - password_file = cfg.grafana.password_file; - }; - } - ]; - exporters = { - node = { - enable = true; - enabledCollectors = [ - "conntrack" - "diskstats" - "entropy" - "filefd" - "filesystem" - "loadavg" - "meminfo" - "netdev" - "netstat" - "stat" - "time" - "vmstat" - "systemd" - "logind" - "interrupts" - "ksmd" - ]; - port = 9100; - }; - }; - scrapeConfigs = [ - { - job_name = "prometheus"; - static_configs = [ - { targets = [ "localhost:${toString config.services.prometheus.port}" ]; } - ]; - } - { - job_name = "node"; - static_configs = [ - { targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ]; } - ]; - } - ]; - - alertmanager = { - enable = true; - listenAddress = "127.0.0.1"; - logLevel = "debug"; - configuration = { - route = { - receiver = "ntfy"; - }; - receivers = [ - { - name = "ntfy"; - webhook_configs = [ - { - url = "https://ntfy.xinyang.life/prometheus-alerts?tpl=yes&m=${lib.escapeURL '' - Alert {{.status}} - {{range .alerts}}-----{{range $k,$v := .labels}} - {{$k}}={{$v}}{{end}} - {{end}} - ''}"; - send_resolved = true; - } - ]; - } - ]; - }; - }; - - alertmanagers = [ - { - scheme = "http"; - static_configs = [ - { - targets = [ - "${config.services.prometheus.alertmanager.listenAddress}:${toString config.services.prometheus.alertmanager.port}" - ]; - } - ]; - } - ]; - - rules = let mkRule = condition: { ... }@rule: (if condition then [ rule ] else [ ]); in [ - (lib.generators.toYAML { } { - groups = (mkRule true - { - name = "system_alerts"; - rules = [ - { - alert = "SystemdFailedUnits"; - expr = "node_systemd_unit_state{state=\"failed\"} > 0"; - for = "5m"; - labels = { severity = "critical"; }; - annotations = { summary = "Systemd has failed units on {{ $labels.instance }}"; description = "There are {{ $value }} failed units on {{ $labels.instance }}. Immediate attention required!"; }; - } - { - alert = "HighLoadAverage"; - expr = "node_load1 > 0.8 * count without (cpu) (node_cpu_seconds_total{mode=\"idle\"})"; - for = "1m"; - labels = { severity = "warning"; }; - annotations = { summary = "High load average detected on {{ $labels.instance }}"; description = "The 1-minute load average ({{ $value }}) exceeds 80% the number of CPUs."; }; - } - { - alert = "HighTransmitTraffic"; - expr = "rate(node_network_transmit_bytes_total{device!=\"lo\"}[5m]) > 100000000"; - for = "1m"; - labels = { severity = "warning"; }; - annotations = { summary = "High network transmit traffic on {{ $labels.instance }} ({{ $labels.device }})"; description = "The network interface {{ $labels.device }} on {{ $labels.instance }} is transmitting data at a rate exceeding 100 MB/s for the last 1 minute."; }; - } - ]; - }) ++ (mkRule config.services.restic.server.enable { - name = "restic_alerts"; - rules = [ - { - alert = "ResticCheckFailed"; - expr = "restic_check_success == 0"; - for = "5m"; - labels = { severity = "critical"; }; - annotations = { summary = "Restic check failed (instance {{ $labels.instance }})"; description = "Restic check failed\\n VALUE = {{ $value }}\\n LABELS = {{ $labels }}"; }; - } - { - alert = "ResticOutdatedBackup"; - expr = "time() - restic_backup_timestamp > 518400"; - for = "0m"; - labels = { severity = "critical"; }; - annotations = { summary = "Restic {{ $labels.client_hostname }} / {{ $labels.client_username }} backup is outdated"; description = "Restic backup is outdated\\n VALUE = {{ $value }}\\n LABELS = {{ $labels }}"; }; - } - ]; - }) ++ (mkRule config.services.caddy.enable { - name = "caddy_alerts"; - rules = [ - { - alert = "UpstreamHealthy"; - expr = "caddy_reverse_proxy_upstreams_healthy == 0"; - for = "5m"; - labels = { severity = "critical"; }; - annotations = { summary = "Upstream {{ $labels.unstream }} not healthy"; }; - } - { - alert = "HighRequestLatency"; - expr = "histogram_quantile(0.95, rate(caddy_http_request_duration_seconds_bucket[10m])) > 0.5"; - for = "2m"; - labels = { severity = "warning"; }; - annotations = { summary = "High request latency on {{ $labels.instance }}"; description = "95th percentile of request latency is above 0.5 seconds for the last 2 minutes."; }; - } - ]; - }); - }) - ]; - }; - } - { - services.prometheus.scrapeConfigs = [ - (mkIf config.services.caddy.enable { - job_name = "caddy"; - static_configs = [ - { targets = [ "localhost:2019" ]; } - ]; - }) - (mkIf config.services.restic.server.enable { - job_name = "restic"; - static_configs = [ - { targets = [ config.services.restic.server.listenAddress ]; } - ]; - }) - (mkIf config.services.gotosocial.enable { - job_name = "gotosocial"; - static_configs = [ - { targets = [ "localhost:${toString config.services.gotosocial.settings.port}" ]; } - ]; - }) - (mkIf config.services.ntfy-sh.enable { - job_name = "ntfy-sh"; - static_configs = [ - { targets = [ "auth.xinyang.life" ]; } - ]; - }) ]; - } + exporters = { + node = { + enable = true; + enabledCollectors = [ "systemd" ]; + port = 9100; + }; + }; + scrapeConfigs = [ + { job_name = "prometheus"; + static_configs = [ + { targets = [ "localhost:${toString config.services.prometheus.port}" ]; } + ]; + } + { job_name = "node"; + static_configs = [ + { targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ]; } + ]; + } + ]; + }; + } + { + services.prometheus.scrapeConfigs = [ + ( mkIf config.services.caddy.enable { + job_name = "caddy"; + static_configs = [ + { targets = [ "localhost:2019" ]; } + ]; + }) + ( mkIf config.services.restic.server.enable { + job_name = "restic"; + static_configs = [ + { targets = [ config.services.restic.server.listenAddress ]; } + ]; + }) + ( mkIf config.services.gotosocial.enable { + job_name = "gotosocial"; + static_configs = [ + { targets = [ "localhost:${toString config.services.gotosocial.settings.port}" ]; } + ]; + }) + ]; + } ]); } From 745ce62f88ba3b8add9cc3ce18a47c93ed22f4d0 Mon Sep 17 00:00:00 2001 From: xinyangli Date: Tue, 30 Jul 2024 15:56:02 +0800 Subject: [PATCH 5/5] feat: better prometheus integration --- modules/nixos/prometheus.nix | 154 ++++++++++++++++++++++++++++------- 1 file changed, 125 insertions(+), 29 deletions(-) diff --git a/modules/nixos/prometheus.nix b/modules/nixos/prometheus.nix index 9ddd255..3e59480 100644 --- a/modules/nixos/prometheus.nix +++ b/modules/nixos/prometheus.nix @@ -32,15 +32,16 @@ in } ''; services.restic.server.prometheus = cfg.enable; - services.gotosocial.settings = { - metrics-enable = true; + services.gotosocial.settings = mkIf cfg.enable { + metrics-enabled = true; }; services.prometheus = mkIf cfg.enable { enable = true; port = 9091; globalConfig.external_labels = { hostname = config.networking.hostName; }; remoteWrite = mkIf cfg.grafana.enable [ - { name = "grafana"; + { + name = "grafana"; url = "https://prometheus-prod-24-prod-eu-west-2.grafana.net/api/prom/push"; basic_auth = { username = "1340065"; @@ -51,45 +52,140 @@ in exporters = { node = { enable = true; - enabledCollectors = [ "systemd" ]; + enabledCollectors = [ + "conntrack" + "diskstats" + "entropy" + "filefd" + "filesystem" + "loadavg" + "meminfo" + "netdev" + "netstat" + "stat" + "time" + "vmstat" + "systemd" + "logind" + "interrupts" + "ksmd" + ]; port = 9100; }; }; scrapeConfigs = [ - { job_name = "prometheus"; + { + job_name = "prometheus"; static_configs = [ { targets = [ "localhost:${toString config.services.prometheus.port}" ]; } ]; } - { job_name = "node"; + { + job_name = "node"; static_configs = [ { targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ]; } ]; } ]; + + alertmanager = { + enable = true; + listenAddress = "127.0.0.1"; + extraFlags = [ + "--cluster.advertise-address=127.0.0.1:9093" + ]; + configuration = { + route = { + receiver = "ntfy"; + }; + receivers = [ + { + name = "ntfy"; + webhook_configs = [ + { + url = "https://ntfy.xinyang.life/prometheus-alerts"; + send_resolved = true; + } + ]; + } + ]; + }; + }; + + alertmanagers = [ + { + scheme = "http"; + path_prefix = "/alertmanager"; + static_configs = [ + { + targets = [ + "${config.services.prometheus.alertmanager.listenAddress}:${toString config.services.prometheus.alertmanager.port}" + ]; + } + ]; + } + ]; + + rules = [ + '' + groups: + - name: system_alerts + rules: + - alert: SystemdFailedUnits + expr: node_systemd_unit_state{state="failed"} > 0 + for: 5m + labels: + severity: critical + annotations: + summary: "Systemd has failed units on {{ $labels.instance }}" + description: "There are {{ $value }} failed units on {{ $labels.instance }}. Immediate attention required!" + + - alert: HighLoadAverage + expr: node_load1 > 0.8 * count without (cpu) (node_cpu_seconds_total{mode="idle"}) + for: 1m + labels: + severity: warning + annotations: + summary: "High load average detected on {{ $labels.instance }}" + description: "The 1-minute load average ({{ $value }}) exceeds 80% the number of CPUs." + + - alert: HighTransmitTraffic + expr: rate(node_network_transmit_bytes_total{device!="lo"}[5m]) > 100000000 + for: 1m + labels: + severity: warning + annotations: + summary: "High network transmit traffic on {{ $labels.instance }} ({{ $labels.device }})" + description: "The network interface {{ $labels.device }} on {{ $labels.instance }} is transmitting data at a rate exceeding 100 MB/s for the last 1 minute." + '' + (if config.services.restic.server.enable then + '' + groups: + - name: restic_alerts + '' else "") + ]; }; } - { - services.prometheus.scrapeConfigs = [ - ( mkIf config.services.caddy.enable { - job_name = "caddy"; - static_configs = [ - { targets = [ "localhost:2019" ]; } - ]; - }) - ( mkIf config.services.restic.server.enable { - job_name = "restic"; - static_configs = [ - { targets = [ config.services.restic.server.listenAddress ]; } - ]; - }) - ( mkIf config.services.gotosocial.enable { - job_name = "gotosocial"; - static_configs = [ - { targets = [ "localhost:${toString config.services.gotosocial.settings.port}" ]; } - ]; - }) - ]; - } - ]); + { + services.prometheus.scrapeConfigs = [ + (mkIf config.services.caddy.enable { + job_name = "caddy"; + static_configs = [ + { targets = [ "localhost:2019" ]; } + ]; + }) + (mkIf config.services.restic.server.enable { + job_name = "restic"; + static_configs = [ + { targets = [ config.services.restic.server.listenAddress ]; } + ]; + }) + (mkIf config.services.gotosocial.enable { + job_name = "gotosocial"; + static_configs = [ + { targets = [ "localhost:${toString config.services.gotosocial.settings.port}" ]; } + ]; + }) + ]; + }]); }