Skip to content

Commit fd29bfe

Browse files
authored
* Remove dashboards volume from grafana default * images update: 4.5.1
1 parent 672dbaa commit fd29bfe

28 files changed

+57
-81
lines changed

base/monitoring/cadvisor/cadvisor.DaemonSet.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ spec:
2626
serviceAccountName: cadvisor
2727
containers:
2828
- name: cadvisor
29-
image: index.docker.io/sourcegraph/cadvisor:4.5.0@sha256:5117f2bc817c16fb129acb6f9b070af8f1be09d3d9a8f88e3297f7adfff9af0d
29+
image: index.docker.io/sourcegraph/cadvisor:4.5.1@sha256:9da386528adbdf755f38dab6a40f6c4dbbb489ea4c418b2412d1418b5b25d5ea
3030
args:
3131
# Kubernetes-specific flags below (other flags are baked into the Docker image)
3232
#

base/monitoring/grafana/grafana.StatefulSet.yaml

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ spec:
2626
spec:
2727
containers:
2828
- name: grafana
29-
image: index.docker.io/sourcegraph/grafana:4.5.0@sha256:f70a7f79c5c90cab0d5cfb8f3dbca4dc60ed390b045aff1a86079c87bfe9a8af
29+
image: index.docker.io/sourcegraph/grafana:4.5.1@sha256:a04ca50bd1678dd0e15a72d77887c2bdd84b70358acb66d430fa0c2fb74e0399
3030
terminationMessagePolicy: FallbackToLogsOnError
3131
ports:
3232
- containerPort: 3370
@@ -36,8 +36,6 @@ spec:
3636
name: grafana-data
3737
- mountPath: /sg_config_grafana/provisioning/datasources
3838
name: config
39-
- mountPath: /sg_grafana_additional_dashboards
40-
name: dashboards
4139
# Grafana is relied upon to send alerts to site admins when something is wrong with
4240
# Sourcegraph, thus its memory requests and limits are the same to guarantee it has enough
4341
# memory to perform its job reliably and prevent conflicts with other pods on the same

base/monitoring/jaeger/jaeger.Deployment.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ spec:
3030
spec:
3131
containers:
3232
- name: jaeger
33-
image: index.docker.io/sourcegraph/jaeger-all-in-one:4.5.0@sha256:461476b01968324a0d8cb43a0176713e006f99cdb1f2efc3ab2210fd0bb812c2
33+
image: index.docker.io/sourcegraph/jaeger-all-in-one:4.5.1@sha256:2f27f6069540d7db46a6a2e72d489802c1dff2081e4f4a94762f63b9147879f8
3434
args: ["--memory.max-traces=20000"]
3535
ports:
3636
- containerPort: 5775

base/monitoring/node-exporter/node-exporter.DaemonSet.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ spec:
2424
spec:
2525
containers:
2626
- name: node-exporter
27-
image: index.docker.io/sourcegraph/node-exporter:4.5.0@sha256:fa8e5700b7762fffe0674e944762f44bb787a7e44d97569fe55348260453bf80
27+
image: index.docker.io/sourcegraph/node-exporter:4.5.1@sha256:fa8e5700b7762fffe0674e944762f44bb787a7e44d97569fe55348260453bf80
2828
imagePullPolicy: IfNotPresent
2929
resources:
3030
limits:

base/monitoring/otel-collector/otel-agent.DaemonSet.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ spec:
2626
spec:
2727
containers:
2828
- name: otel-agent
29-
image: index.docker.io/sourcegraph/opentelemetry-collector:4.5.0@sha256:12f3fc137edea8319ebf574e15e6c27c19fb0b7ca17165973f98c8d8c342ca1d
29+
image: index.docker.io/sourcegraph/opentelemetry-collector:4.5.1@sha256:647b6d3f12788eaf0f1708c5307bcd86804748972ad31ef857bbc189cffac342
3030
command:
3131
- "/bin/otelcol-sourcegraph"
3232
- "--config=/etc/otel-agent/config.yaml"

base/monitoring/otel-collector/otel-collector.Deployment.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ spec:
2626
spec:
2727
containers:
2828
- name: otel-collector
29-
image: index.docker.io/sourcegraph/opentelemetry-collector:4.5.0@sha256:12f3fc137edea8319ebf574e15e6c27c19fb0b7ca17165973f98c8d8c342ca1d
29+
image: index.docker.io/sourcegraph/opentelemetry-collector:4.5.1@sha256:647b6d3f12788eaf0f1708c5307bcd86804748972ad31ef857bbc189cffac342
3030
command:
3131
- "/bin/otelcol-sourcegraph"
3232
# To use a custom configuration, edit otel-collector.ConfigMap.yaml

base/monitoring/prometheus/kustomization.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@ resources:
44
- prometheus.Deployment.yaml
55
- prometheus.PersistentVolumeClaim.yaml
66
- prometheus.Service.yaml
7-
87
# Create ConfigMap with prometheus.yml
98
configMapGenerator:
109
- name: prometheus

base/monitoring/prometheus/prometheus.Deployment.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ spec:
2525
spec:
2626
containers:
2727
- name: prometheus
28-
image: index.docker.io/sourcegraph/prometheus:4.5.0@sha256:4fe9a5fdee206b1aac9d32afb31ad57e1882394aad9e7e9f719a1b2741afcae5
28+
image: index.docker.io/sourcegraph/prometheus:4.5.1@sha256:e27296dc04bec4e4c1cc2d434c0b49d026a60f53b8907d1325ee1de42693abd3
2929
terminationMessagePolicy: FallbackToLogsOnError
3030
env:
3131
- name: SG_NAMESPACE

base/monitoring/prometheus/prometheus.yml

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2,19 +2,16 @@ global: # Prometheus global config
22
# scrape_timeout is set to the global default (10s)
33
scrape_interval: 30s # How frequently to scrape targets by default
44
evaluation_interval: 30s # How frequently to evaluate rules
5-
65
alerting: # Alertmanager configuration
76
alertmanagers:
87
# bundled alertmanager, started by prom-wrapper
98
- static_configs:
109
- targets: ["127.0.0.1:9093"]
1110
path_prefix: /alertmanager
12-
# add more alertmanagers here
13-
11+
# add more alertmanagers here
1412
rule_files: # Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
1513
- "/sg_config_prometheus/*_rules.yml"
1614
- "/sg_prometheus_add_ons/*_rules.yml"
17-
1815
scrape_configs: # Configure targets to scrape
1916
# Scrape prometheus itself for metrics.
2017
- job_name: "builtin-prometheus"
@@ -25,7 +22,6 @@ scrape_configs: # Configure targets to scrape
2522
metrics_path: /alertmanager/metrics
2623
static_configs:
2724
- targets: ["127.0.0.1:9093"]
28-
2925
#------------------------------------------------------------------------------
3026
# cAdvisor
3127
#------------------------------------------------------------------------------
@@ -47,16 +43,11 @@ scrape_configs: # Configure targets to scrape
4743
- source_labels: [container_label_io_kubernetes_pod_namespace]
4844
regex: kube-system
4945
action: drop
50-
- source_labels:
51-
[
52-
container_label_io_kubernetes_container_name,
53-
container_label_io_kubernetes_pod_name,
54-
]
46+
- source_labels: [container_label_io_kubernetes_container_name, container_label_io_kubernetes_pod_name]
5547
regex: (.+)
5648
action: replace
5749
target_label: name
5850
separator: "-"
59-
6051
#------------------------------------------------------------------------------
6152
# Sourcegraph Service Discovery with DNS-SRV records
6253
# https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config
@@ -126,7 +117,6 @@ scrape_configs: # Configure targets to scrape
126117
- source_labels: [container_label_io_kubernetes_pod_namespace]
127118
target_label: ns
128119
action: replace
129-
130120
#------------------------------------------------------------------------------
131121
# Sourcegraph Service Discovery with statics targets
132122
#------------------------------------------------------------------------------

base/monitoring/prometheus/rbac/kustomization.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@ resources:
44
- prometheus.ClusterRole.yaml
55
- prometheus.ClusterRoleBinding.yaml
66
- prometheus.ServiceAccount.yaml
7-
87
# Create ConfigMap with prometheus.yml
98
configMapGenerator:
109
- name: prometheus-rbac

0 commit comments

Comments
 (0)