|
| 1 | +apiVersion: v1 |
| 2 | +kind: Alert |
| 3 | +app: "Nginx-Ingress" |
| 4 | +version: 1.0.0 |
| 5 | +appVersion: |
| 6 | +- '2.7.0' |
| 7 | +configurations: |
| 8 | +- kind: Prometheus |
| 9 | + data: | |
| 10 | + - alert: NginxHighHttp4xxErrorRate |
| 11 | + expr: sum(rate(nginx_ingress_controller_requests{status=~"4.."}[1m])) / sum(rate(nginx_ingress_controller_requests[1m])) * 100 > 5 |
| 12 | + for: 5m |
| 13 | + labels: |
| 14 | + severity: critical |
| 15 | + annotations: |
| 16 | + summary: Nginx high HTTP 4xx error rate (instance {{ $labels.instance }}) |
| 17 | + description: Too many HTTP requests with status 4xx (> 5%) |
| 18 | + - alert: NginxHighHttp5xxErrorRate |
| 19 | + expr: sum(rate(nginx_ingress_controller_requests{status=~"^5.."}[1m])) / sum(rate(nginx_ingress_controller_requests[1m])) * 100 > 5 |
| 20 | + for: 5m |
| 21 | + labels: |
| 22 | + severity: critical |
| 23 | + annotations: |
| 24 | + summary: Nginx high HTTP 5xx error rate (instance {{ $labels.instance }}) |
| 25 | + description: Too many HTTP requests with status 5xx (> 5%) |
| 26 | + - alert: NginxLatencyHigh |
| 27 | + expr: histogram_quantile(0.99, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket[30m])) by (host, node)) > 10 |
| 28 | + for: 5m |
| 29 | + labels: |
| 30 | + severity: warning |
| 31 | + annotations: |
| 32 | + summary: Nginx latency high (instance {{ $labels.instance }}) |
| 33 | + description: Nginx p99 latency is higher than 10 seconds |
| 34 | +- kind: Sysdig |
| 35 | + data: |- |
| 36 | + { |
| 37 | + "alert": { |
| 38 | + "condition": "sum(rate(nginx_ingress_controller_requests{status=~\"4..\"}[1m])) / sum(rate(nginx_ingress_controller_requests[1m])) * 100 > 5", |
| 39 | + "customNotification": { |
| 40 | + "titleTemplate": "{{__alert_name__}} is {{__alert_status__}}", |
| 41 | + "useNewTemplate": false |
| 42 | + }, |
| 43 | + "enabled": true, |
| 44 | + "name": "NginxHighHttp4xxErrorRate", |
| 45 | + "rateOfChange": false, |
| 46 | + "reNotify": false, |
| 47 | + "reNotifyMinutes": 5, |
| 48 | + "severity": 4, |
| 49 | + "severityLabel": "LOW", |
| 50 | + "severityLevel": null, |
| 51 | + "timespan": 600000000, |
| 52 | + "type": "PROMETHEUS" |
| 53 | + } |
| 54 | + } |
| 55 | +- kind: Sysdig |
| 56 | + data: |- |
| 57 | + { |
| 58 | + "alert": { |
| 59 | + "condition": "sum(rate(nginx_ingress_controller_requests{status=~\"^5..\"}[1m])) / sum(rate(nginx_ingress_controller_requests[1m])) * 100 > 5", |
| 60 | + "customNotification": { |
| 61 | + "titleTemplate": "{{__alert_name__}} is {{__alert_status__}}", |
| 62 | + "useNewTemplate": false |
| 63 | + }, |
| 64 | + "enabled": true, |
| 65 | + "name": "NginxHighHttp5xxErrorRate", |
| 66 | + "rateOfChange": false, |
| 67 | + "reNotify": false, |
| 68 | + "reNotifyMinutes": 5, |
| 69 | + "severity": 4, |
| 70 | + "severityLabel": "LOW", |
| 71 | + "severityLevel": null, |
| 72 | + "timespan": 600000000, |
| 73 | + "type": "PROMETHEUS" |
| 74 | + } |
| 75 | + } |
| 76 | +- kind: Sysdig |
| 77 | + data: |- |
| 78 | + { |
| 79 | + "alert": { |
| 80 | + "condition": "histogram_quantile(0.99, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket[30m])) by (host, node)) > 10", |
| 81 | + "customNotification": { |
| 82 | + "titleTemplate": "{{__alert_name__}} is {{__alert_status__}}", |
| 83 | + "useNewTemplate": false |
| 84 | + }, |
| 85 | + "enabled": true, |
| 86 | + "name": "NginxLatencyHigh", |
| 87 | + "rateOfChange": false, |
| 88 | + "reNotify": false, |
| 89 | + "reNotifyMinutes": 5, |
| 90 | + "severity": 4, |
| 91 | + "severityLabel": "LOW", |
| 92 | + "severityLevel": null, |
| 93 | + "timespan": 600000000, |
| 94 | + "type": "PROMETHEUS" |
| 95 | + } |
| 96 | + } |
| 97 | +descriptionFile: ALERTS.md |
| 98 | +description: | |
| 99 | + # Alerts |
| 100 | + ## NoIntancesUp |
| 101 | + This alert detects when there is no instances with a certain label 'app' for more than 10 minutes. |
0 commit comments