fix
This commit is contained in:
parent
c88f86eec2
commit
013d87afdf
1 changed files with 187 additions and 193 deletions
|
@ -25,7 +25,8 @@ in
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf cfg.enable (mkMerge [{
|
config = mkIf cfg.enable (mkMerge [
|
||||||
|
{
|
||||||
services.tailscale = {
|
services.tailscale = {
|
||||||
enable = true;
|
enable = true;
|
||||||
permitCertUid = config.services.caddy.user;
|
permitCertUid = config.services.caddy.user;
|
||||||
|
@ -47,7 +48,10 @@ in
|
||||||
services.gotosocial.settings = mkIf cfg.enable {
|
services.gotosocial.settings = mkIf cfg.enable {
|
||||||
metrics-enabled = true;
|
metrics-enabled = true;
|
||||||
};
|
};
|
||||||
services.prometheus = mkIf cfg.enable {
|
services.ntfy-sh.settings.enable-metrics = true;
|
||||||
|
|
||||||
|
services.prometheus = mkIf cfg.enable
|
||||||
|
{
|
||||||
enable = true;
|
enable = true;
|
||||||
port = 9091;
|
port = 9091;
|
||||||
globalConfig.external_labels = { hostname = config.networking.hostName; };
|
globalConfig.external_labels = { hostname = config.networking.hostName; };
|
||||||
|
@ -103,9 +107,7 @@ in
|
||||||
alertmanager = {
|
alertmanager = {
|
||||||
enable = true;
|
enable = true;
|
||||||
listenAddress = "127.0.0.1";
|
listenAddress = "127.0.0.1";
|
||||||
extraFlags = [
|
logLevel = "debug";
|
||||||
"--cluster.advertise-address=127.0.0.1:9093"
|
|
||||||
];
|
|
||||||
configuration = {
|
configuration = {
|
||||||
route = {
|
route = {
|
||||||
receiver = "ntfy";
|
receiver = "ntfy";
|
||||||
|
@ -115,7 +117,12 @@ in
|
||||||
name = "ntfy";
|
name = "ntfy";
|
||||||
webhook_configs = [
|
webhook_configs = [
|
||||||
{
|
{
|
||||||
url = "https://ntfy.xinyang.life/prometheus-alerts";
|
url = "https://ntfy.xinyang.life/prometheus-alerts?tpl=yes&m=${lib.escapeURL ''
|
||||||
|
Alert {{.status}}
|
||||||
|
{{range .alerts}}-----{{range $k,$v := .labels}}
|
||||||
|
{{$k}}={{$v}}{{end}}
|
||||||
|
{{end}}
|
||||||
|
''}";
|
||||||
send_resolved = true;
|
send_resolved = true;
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
@ -127,7 +134,6 @@ in
|
||||||
alertmanagers = [
|
alertmanagers = [
|
||||||
{
|
{
|
||||||
scheme = "http";
|
scheme = "http";
|
||||||
path_prefix = "/alertmanager";
|
|
||||||
static_configs = [
|
static_configs = [
|
||||||
{
|
{
|
||||||
targets = [
|
targets = [
|
||||||
|
@ -138,91 +144,72 @@ in
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
|
rules = let mkRule = condition: { ... }@rule: (if condition then [ rule ] else [ ]); in [
|
||||||
|
(lib.generators.toYAML { } {
|
||||||
|
groups = (mkRule true
|
||||||
|
{
|
||||||
|
name = "system_alerts";
|
||||||
rules = [
|
rules = [
|
||||||
''
|
{
|
||||||
groups:
|
alert = "SystemdFailedUnits";
|
||||||
- name: system_alerts
|
expr = "node_systemd_unit_state{state=\"failed\"} > 0";
|
||||||
rules:
|
for = "5m";
|
||||||
- alert: SystemdFailedUnits
|
labels = { severity = "critical"; };
|
||||||
expr: node_systemd_unit_state{state="failed"} > 0
|
annotations = { summary = "Systemd has failed units on {{ $labels.instance }}"; description = "There are {{ $value }} failed units on {{ $labels.instance }}. Immediate attention required!"; };
|
||||||
for: 5m
|
}
|
||||||
labels:
|
{
|
||||||
severity: critical
|
alert = "HighLoadAverage";
|
||||||
annotations:
|
expr = "node_load1 > 0.8 * count without (cpu) (node_cpu_seconds_total{mode=\"idle\"})";
|
||||||
summary: "Systemd has failed units on {{ $labels.instance }}"
|
for = "1m";
|
||||||
description: "There are {{ $value }} failed units on {{ $labels.instance }}. Immediate attention required!"
|
labels = { severity = "warning"; };
|
||||||
|
annotations = { summary = "High load average detected on {{ $labels.instance }}"; description = "The 1-minute load average ({{ $value }}) exceeds 80% the number of CPUs."; };
|
||||||
- alert: HighLoadAverage
|
}
|
||||||
expr: node_load1 > 0.8 * count without (cpu) (node_cpu_seconds_total{mode="idle"})
|
{
|
||||||
for: 1m
|
alert = "HighTransmitTraffic";
|
||||||
labels:
|
expr = "rate(node_network_transmit_bytes_total{device!=\"lo\"}[5m]) > 100000000";
|
||||||
severity: warning
|
for = "1m";
|
||||||
annotations:
|
labels = { severity = "warning"; };
|
||||||
summary: "High load average detected on {{ $labels.instance }}"
|
annotations = { summary = "High network transmit traffic on {{ $labels.instance }} ({{ $labels.device }})"; description = "The network interface {{ $labels.device }} on {{ $labels.instance }} is transmitting data at a rate exceeding 100 MB/s for the last 1 minute."; };
|
||||||
description: "The 1-minute load average ({{ $value }}) exceeds 80% the number of CPUs."
|
}
|
||||||
|
];
|
||||||
- alert: HighTransmitTraffic
|
}) ++ (mkRule config.services.restic.server.enable {
|
||||||
expr: rate(node_network_transmit_bytes_total{device!="lo"}[5m]) > 100000000
|
name = "restic_alerts";
|
||||||
for: 1m
|
rules = [
|
||||||
labels:
|
{
|
||||||
severity: warning
|
alert = "ResticCheckFailed";
|
||||||
annotations:
|
expr = "restic_check_success == 0";
|
||||||
summary: "High network transmit traffic on {{ $labels.instance }} ({{ $labels.device }})"
|
for = "5m";
|
||||||
description: "The network interface {{ $labels.device }} on {{ $labels.instance }} is transmitting data at a rate exceeding 100 MB/s for the last 1 minute."
|
labels = { severity = "critical"; };
|
||||||
''
|
annotations = { summary = "Restic check failed (instance {{ $labels.instance }})"; description = "Restic check failed\\n VALUE = {{ $value }}\\n LABELS = {{ $labels }}"; };
|
||||||
(if config.services.restic.server.enable then
|
}
|
||||||
''
|
{
|
||||||
groups:
|
alert = "ResticOutdatedBackup";
|
||||||
- name: restic_alerts
|
expr = "time() - restic_backup_timestamp > 518400";
|
||||||
rules:
|
for = "0m";
|
||||||
- alert: ResticCheckFailed
|
labels = { severity = "critical"; };
|
||||||
expr: restic_check_success == 0
|
annotations = { summary = "Restic {{ $labels.client_hostname }} / {{ $labels.client_username }} backup is outdated"; description = "Restic backup is outdated\\n VALUE = {{ $value }}\\n LABELS = {{ $labels }}"; };
|
||||||
for: 5m
|
}
|
||||||
labels:
|
];
|
||||||
severity: critical
|
}) ++ (mkRule config.services.caddy.enable {
|
||||||
annotations:
|
name = "caddy_alerts";
|
||||||
summary: Restic check failed (instance {{ $labels.instance }})
|
rules = [
|
||||||
description: Restic check failed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}
|
{
|
||||||
|
alert = "UpstreamHealthy";
|
||||||
- alert: ResticOutdatedBackup
|
expr = "caddy_reverse_proxy_upstreams_healthy == 0";
|
||||||
# 1209600 = 15 days
|
for = "5m";
|
||||||
expr: time() - restic_backup_timestamp > 518400
|
labels = { severity = "critical"; };
|
||||||
for: 0m
|
annotations = { summary = "Upstream {{ $labels.unstream }} not healthy"; };
|
||||||
labels:
|
}
|
||||||
severity: critical
|
{
|
||||||
annotations:
|
alert = "HighRequestLatency";
|
||||||
summary: Restic {{ $labels.client_hostname }} / {{ $labels.client_username }} backup is outdated
|
expr = "histogram_quantile(0.95, rate(caddy_http_request_duration_seconds_bucket[10m])) > 0.5";
|
||||||
description: Restic backup is outdated\n VALUE = {{ $value }}\n LABELS = {{ $labels }}
|
for = "2m";
|
||||||
'' else "")
|
labels = { severity = "warning"; };
|
||||||
(if config.services.caddy.enable then ''
|
annotations = { summary = "High request latency on {{ $labels.instance }}"; description = "95th percentile of request latency is above 0.5 seconds for the last 2 minutes."; };
|
||||||
groups:
|
}
|
||||||
- name: caddy_alerts
|
];
|
||||||
rules:
|
});
|
||||||
- alert: HighHttpErrorRate
|
})
|
||||||
expr: rate(caddy_http_request_duration_seconds_count{status_code=~"5.."}[5m]) / rate(caddy_http_request_duration_seconds_count[5m]) > 0.01
|
|
||||||
for: 10m
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
summary: "High error rate on {{ $labels.instance }}"
|
|
||||||
description: "More than 1% of HTTP requests are errors over the last 10 minutes."
|
|
||||||
- alert: CaddyDown
|
|
||||||
expr: up{job="caddy"} == 0
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
summary: "Caddy server down on {{ $labels.instance }}"
|
|
||||||
description: "Caddy server is down for more than 5 minutes."
|
|
||||||
- alert: HighRequestLatency
|
|
||||||
expr: histogram_quantile(0.95, rate(caddy_http_request_duration_seconds_bucket[10m])) > 0.5
|
|
||||||
for: 2m
|
|
||||||
labels:
|
|
||||||
severity: warning
|
|
||||||
annotations:
|
|
||||||
summary: "High request latency on {{ $labels.instance }}"
|
|
||||||
description: "95th percentile of request latency is above 0.5 seconds for the last 2 minutes."
|
|
||||||
'' else "")
|
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -246,6 +233,13 @@ in
|
||||||
{ targets = [ "localhost:${toString config.services.gotosocial.settings.port}" ]; }
|
{ targets = [ "localhost:${toString config.services.gotosocial.settings.port}" ]; }
|
||||||
];
|
];
|
||||||
})
|
})
|
||||||
|
(mkIf config.services.ntfy-sh.enable {
|
||||||
|
job_name = "ntfy-sh";
|
||||||
|
static_configs = [
|
||||||
|
{ targets = [ "auth.xinyang.life" ]; }
|
||||||
];
|
];
|
||||||
}]);
|
})
|
||||||
|
];
|
||||||
|
}
|
||||||
|
]);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue