|
| 1 | +{{- if .Values.serviceMonitor.enabled -}} |
| 2 | +apiVersion: v1 |
| 3 | +kind: Pod |
| 4 | +metadata: |
| 5 | + name: {{ include "trino-gateway.fullname" . }}-test-servicemonitor |
| 6 | + labels: |
| 7 | + {{- include "trino-gateway.labels" . | nindent 4 }} |
| 8 | + app.kubernetes.io/component: test |
| 9 | + test: servicemonitor |
| 10 | + annotations: |
| 11 | + "helm.sh/hook": test |
| 12 | + "helm.sh/hook-delete-policy": hook-succeeded |
| 13 | +spec: |
| 14 | + containers: |
| 15 | + - name: service-monitor |
| 16 | + image: python:3-slim |
| 17 | + command: ["python", "/tests/test.py"] |
| 18 | + args: ["{{ include "trino-gateway.fullname" . }}", "{{ .Values.serviceName }}"] |
| 19 | + volumeMounts: |
| 20 | + - name: tests |
| 21 | + mountPath: /tests |
| 22 | + volumes: |
| 23 | + - name: tests |
| 24 | + configMap: |
| 25 | + name: {{ include "trino-gateway.fullname" . }}-test-servicemonitor |
| 26 | + restartPolicy: Never |
| 27 | +--- |
| 28 | +apiVersion: v1 |
| 29 | +kind: ConfigMap |
| 30 | +metadata: |
| 31 | + name: {{ include "trino-gateway.fullname" . }}-test-servicemonitor |
| 32 | + labels: |
| 33 | + {{- include "trino-gateway.labels" . | nindent 4 }} |
| 34 | + app.kubernetes.io/component: test |
| 35 | + test: servicemonitor |
| 36 | + annotations: |
| 37 | + "helm.sh/hook": test |
| 38 | + "helm.sh/hook-delete-policy": hook-succeeded |
| 39 | +data: |
| 40 | + test.py: | |
| 41 | + from urllib.request import urlopen |
| 42 | + from urllib.error import URLError, HTTPError |
| 43 | + import json |
| 44 | + import logging |
| 45 | + import sys |
| 46 | + import time |
| 47 | +
|
| 48 | + logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') |
| 49 | + logger = logging.getLogger(__name__) |
| 50 | +
|
| 51 | + servicemonitor_name = sys.argv[1] |
| 52 | + expected_service = sys.argv[2] |
| 53 | + namespace = "{{ .Release.Namespace }}" |
| 54 | + url = f"http://prometheus-operator-kube-p-prometheus:9090/api/v1/targets?scrapePool=serviceMonitor/{namespace}/{servicemonitor_name}/0&state=active" |
| 55 | + all_targets_url = "http://prometheus-operator-kube-p-prometheus:9090/api/v1/targets" |
| 56 | +
|
| 57 | + max_retries = 90 # 3 minutes max (90 * 2 seconds) |
| 58 | + retry_count = 0 |
| 59 | +
|
| 60 | + logger.info(f"Looking for ServiceMonitor '{servicemonitor_name}' in namespace '{namespace}'") |
| 61 | + logger.info(f"Expected service name: '{expected_service}'") |
| 62 | +
|
| 63 | + while retry_count < max_retries: |
| 64 | + try: |
| 65 | + with urlopen(url, timeout=10) as response: |
| 66 | + data = json.load(response) |
| 67 | + except (URLError, HTTPError) as e: |
| 68 | + retry_count += 1 |
| 69 | + logger.warning(f"Error fetching targets (attempt {retry_count}/{max_retries}), Prometheus service might not be ready: {e}") |
| 70 | + if retry_count >= max_retries: |
| 71 | + logger.error(f"Failed to connect to Prometheus after {max_retries} attempts") |
| 72 | + sys.exit(1) |
| 73 | + time.sleep(2) # Retry after 2 seconds |
| 74 | + continue |
| 75 | +
|
| 76 | + try: |
| 77 | + active_targets = data.get("data", {}).get("activeTargets", []) |
| 78 | + if not active_targets: |
| 79 | + retry_count += 1 |
| 80 | + # Log diagnostic info every 10 attempts |
| 81 | + if retry_count % 10 == 0: |
| 82 | + try: |
| 83 | + with urlopen(all_targets_url, timeout=10) as all_response: |
| 84 | + all_data = json.load(all_response) |
| 85 | + all_active = all_data.get("data", {}).get("activeTargets", []) |
| 86 | + logger.info(f"Prometheus has {len(all_active)} total active targets") |
| 87 | + # Find ServiceMonitor scrape pools |
| 88 | + servicemonitor_pools = [t.get("scrapePool", "") for t in all_active if "serviceMonitor" in t.get("scrapePool", "")] |
| 89 | + if servicemonitor_pools: |
| 90 | + logger.info(f"Found ServiceMonitor scrape pools: {servicemonitor_pools[:5]}") # Show first 5 |
| 91 | + except Exception as e: |
| 92 | + logger.debug(f"Could not fetch all targets for diagnostics: {e}") |
| 93 | + logger.warning(f"No active targets found (attempt {retry_count}/{max_retries}), waiting for ServiceMonitor to be discovered...") |
| 94 | + if retry_count >= max_retries: |
| 95 | + logger.error(f"No active targets found after {max_retries} attempts") |
| 96 | + logger.error(f"ServiceMonitor '{servicemonitor_name}' was not discovered by Prometheus") |
| 97 | + sys.exit(1) |
| 98 | + time.sleep(2) # Retry after 2 seconds |
| 99 | + continue |
| 100 | + service_name = active_targets[0]["discoveredLabels"]["__meta_kubernetes_service_name"] |
| 101 | + except (KeyError, IndexError) as e: |
| 102 | + retry_count += 1 |
| 103 | + logger.warning(f"Invalid Prometheus response (attempt {retry_count}/{max_retries}): {e}") |
| 104 | + if retry_count >= max_retries: |
| 105 | + logger.error(f"Invalid Prometheus response after {max_retries} attempts") |
| 106 | + sys.exit(1) |
| 107 | + time.sleep(2) # Retry after 2 seconds |
| 108 | + continue |
| 109 | +
|
| 110 | + if service_name == expected_service: |
| 111 | + logger.info(f"Found expected service '{service_name}' in Prometheus targets!") |
| 112 | + sys.exit(0) |
| 113 | + else: |
| 114 | + retry_count += 1 |
| 115 | + logger.warning(f"Service name mismatch: expected '{expected_service}', got '{service_name}' (attempt {retry_count}/{max_retries})") |
| 116 | + if retry_count >= max_retries: |
| 117 | + logger.error(f"Service name mismatch after {max_retries} attempts") |
| 118 | + sys.exit(1) |
| 119 | + time.sleep(2) |
| 120 | +
|
| 121 | + logger.error(f"Test failed after {max_retries} attempts") |
| 122 | + sys.exit(1) |
| 123 | +{{- end }} |
0 commit comments