diff --git a/macros/edge-services/edge-services-lb-benefits.mdx b/macros/edge-services/edge-services-lb-benefits.mdx
index ad2a0dcacc..da90c9a73a 100644
--- a/macros/edge-services/edge-services-lb-benefits.mdx
+++ b/macros/edge-services/edge-services-lb-benefits.mdx
@@ -2,11 +2,9 @@
macro: edge-services-lb-benefits
---
-import EdgeServicesPipelineDiag from './assets/scaleway-edge-services-pipeline-diag.webp'
-
Creating an Edge Services pipeline for your Load Balancer helps to reduce load on your Load Balancer's backend servers. The origin configuration you define is used by Edge Services to connect to your Load Balancer and request content, which is then stored in the cache. Then, when your Load Balancer origin is accessed via its customizable Edge Services endpoint, the requested content is served from the cache (if present), without the need to fetch this content via the Load Balancer and its backend servers.
-
+
Edge Services lets you:
diff --git a/pages/cockpit/how-to/access-grafana-and-managed-dashboards.mdx b/pages/cockpit/how-to/access-grafana-and-managed-dashboards.mdx
index 8019f105e4..b9dbdf45c9 100644
--- a/pages/cockpit/how-to/access-grafana-and-managed-dashboards.mdx
+++ b/pages/cockpit/how-to/access-grafana-and-managed-dashboards.mdx
@@ -3,7 +3,7 @@ title: How to access Grafana and your preconfigured dashboards
description: Learn to access your Grafana preconfigured dashboards, including steps to log in, navigate, and view your resources.
tags: observability cockpit grafana preconfigured-dashboard
dates:
- validation: 2025-07-22
+ validation: 2025-08-20
posted: 2022-10-31
---
import Requirements from '@macros/iam/requirements.mdx'
@@ -24,7 +24,7 @@ This page shows you how to access [preconfigured dashboards](/cockpit/concepts/#
2. Click **Open dashboards** to open your preconfigured dashboards in Grafana. You are redirected to the Grafana website.
3. Enter your [Grafana credentials](/cockpit/how-to/retrieve-grafana-credentials/).
4. Click **Log in**. The Grafana overview page displays.
-5. Click the **Home** icon at the top left of your screen.
+5. Click the Grafana icon in the top left side of your screen to open the menu.
6. Click **Dashboard**. The Scaleway folder appears, with a list of all available preconfigured dashboards for Scaleway resources.
7. Click the dashboard you want to view.
diff --git a/pages/cockpit/how-to/assets/scaleway-advanced-options.webp b/pages/cockpit/how-to/assets/scaleway-advanced-options.webp
new file mode 100644
index 0000000000..533d76992e
Binary files /dev/null and b/pages/cockpit/how-to/assets/scaleway-advanced-options.webp differ
diff --git a/pages/cockpit/how-to/assets/scaleway-alert-firing.webp b/pages/cockpit/how-to/assets/scaleway-alert-firing.webp
new file mode 100644
index 0000000000..e8005cd255
Binary files /dev/null and b/pages/cockpit/how-to/assets/scaleway-alert-firing.webp differ
diff --git a/pages/cockpit/how-to/assets/scaleway-alerts-firing.webp b/pages/cockpit/how-to/assets/scaleway-alerts-firing.webp
deleted file mode 100644
index f714c6c705..0000000000
Binary files a/pages/cockpit/how-to/assets/scaleway-alerts-firing.webp and /dev/null differ
diff --git a/pages/cockpit/how-to/assets/scaleway-alerts-via-grafana-ui.webp b/pages/cockpit/how-to/assets/scaleway-alerts-via-grafana-ui.webp
new file mode 100644
index 0000000000..9c690b0daf
Binary files /dev/null and b/pages/cockpit/how-to/assets/scaleway-alerts-via-grafana-ui.webp differ
diff --git a/pages/cockpit/how-to/assets/scaleway-datasource-managed.webp b/pages/cockpit/how-to/assets/scaleway-datasource-managed.webp
new file mode 100644
index 0000000000..95939673fe
Binary files /dev/null and b/pages/cockpit/how-to/assets/scaleway-datasource-managed.webp differ
diff --git a/pages/cockpit/how-to/assets/scaleway-firing-alert-tabs.webp b/pages/cockpit/how-to/assets/scaleway-firing-alert-tabs.webp
new file mode 100644
index 0000000000..bb25b1b308
Binary files /dev/null and b/pages/cockpit/how-to/assets/scaleway-firing-alert-tabs.webp differ
diff --git a/pages/cockpit/how-to/assets/scaleway-metric-selection.webp b/pages/cockpit/how-to/assets/scaleway-metric-selection.webp
deleted file mode 100644
index be358502aa..0000000000
Binary files a/pages/cockpit/how-to/assets/scaleway-metric-selection.webp and /dev/null differ
diff --git a/pages/cockpit/how-to/assets/scaleway-metrics-displayed.webp b/pages/cockpit/how-to/assets/scaleway-metrics-displayed.webp
deleted file mode 100644
index 5ea9a88d5b..0000000000
Binary files a/pages/cockpit/how-to/assets/scaleway-metrics-displayed.webp and /dev/null differ
diff --git a/pages/cockpit/how-to/assets/scaleway-metrics-explorer.webp b/pages/cockpit/how-to/assets/scaleway-metrics-explorer.webp
new file mode 100644
index 0000000000..9a04e03b02
Binary files /dev/null and b/pages/cockpit/how-to/assets/scaleway-metrics-explorer.webp differ
diff --git a/pages/cockpit/how-to/configure-alerts-for-scw-resources.mdx b/pages/cockpit/how-to/configure-alerts-for-scw-resources.mdx
index 800b8b6c75..c1f6da2faa 100644
--- a/pages/cockpit/how-to/configure-alerts-for-scw-resources.mdx
+++ b/pages/cockpit/how-to/configure-alerts-for-scw-resources.mdx
@@ -2,47 +2,55 @@
title: How to configure alerts for Scaleway resources in Grafana
description: Learn how to configure alerts for Scaleway resources in Grafana. Follow the steps to create alert rules, define conditions, and set up notifications for your monitored resources.
dates:
- validation: 2025-05-12
+ validation: 2025-08-20
posted: 2023-11-06
---
import Requirements from '@macros/iam/requirements.mdx'
-import image from './assets/scaleway-switch-to-managed-alerts-button.webp'
-import image2 from './assets/scaleway-metrics-browser.webp'
-import image3 from './assets/scaleway-metrics-displayed.webp'
-import image4 from './assets/scaleway-metric-selection.webp'
-import image5 from './assets/scaleway-metrics-browser.webp'
-import image6 from './assets/scaleway-metrics-displayed.webp'
-import image7 from './assets/scaleway-metrics-browser.webp'
-import image8 from './assets/scaleway-metrics-displayed.webp'
-import image9 from './assets/scaleway-metrics-browser.webp'
-import image10 from './assets/scaleway-metrics-displayed.webp'
-import image11 from './assets/scaleway-alerts-firing.webp'
+import AdvancedOptionsGrafana from './assets/scaleway-advanced-options.webp'
+import DataSourceManaged from './assets/scaleway-datasource-managed.webp'
+import MetricsExplorer from './assets/scaleway-metrics-explorer.webp'
+import FiringAlertTabs from './assets/scaleway-firing-alert-tabs.webp'
+import AlertsFiringGrafana from './assets/scaleway-alert-firing.webp'
+import AlertsViaGrafanaUI from './assets/scaleway-alerts-via-grafana-ui.webp'
+Cockpit does not support the Grafana alert manager nor Grafana-managed alert rules. You should only use the Grafana interface to define alert rules, not to evaluate or receive alert notifications. Alerting rules are evaluated at the data source level and notifications are handled and sent out by the Scaleway alert manager only.
-Cockpit does not support Grafana-managed alerting. It integrates with Grafana to visualize metrics, but alerts are managed through the Scaleway alert manager. You should use Grafana only to define alert rules, not to evaluate or receive alert notifications. Once the conditions of your alert rule are met, the Scaleway alert manager evaluates the rule and sends a notification to the contacts you have configured in the Scaleway console or in Grafana.
+Once the conditions of your alert rule are met, the rule evaluation engine of your data source forwards the firing alert to the Scaleway alert manager, which then sends a notification to the contacts you have configured in the Scaleway console or in Grafana.
+
+You can also create alerting rules on your custom data sources.
This page shows you how to create alert rules in Grafana for monitoring Scaleway resources integrated with Cockpit, such as Instances, Object Storage, and Kubernetes. These alerts rely on Scaleway-provided metrics, which are preconfigured and available in the **Metrics browser** drop-down when using the **Scaleway Metrics data source** in the Grafana interface. This page explains how to use the `Scaleway Metrics` data source, interpret metrics, set alert conditions, and activate alerts.
- - A Scaleway account logged into the [console](https://console.scaleway.com)
- - [Owner](/iam/concepts/#owner) status or [IAM permissions](/iam/concepts/#permission) allowing you to perform actions in the intended Organization
- - Scaleway resources you can monitor
- - [Created Grafana credentials](/cockpit/how-to/retrieve-grafana-credentials/) with the **Editor** role
- - [Enabled](/cockpit/how-to/enable-alert-manager/) the Scaleway alert manager
- - [Created](/cockpit/how-to/add-contact-points/) a contact in the Scaleway console or a contact point in Grafana (with the `Scaleway Alerting` alert manager of the same region as your `Scaleway Metrics` data source), otherwise alerts will not be delivered
+- A Scaleway account logged into the [console](https://console.scaleway.com)
+- [Owner](/iam/concepts/#owner) status or [IAM permissions](/iam/concepts/#permission) allowing you to perform actions in the intended Organization
+- Scaleway resources you can monitor
+- [Created Grafana credentials](/cockpit/how-to/retrieve-grafana-credentials/) with the **Editor** role
+- [Enabled](/cockpit/how-to/enable-alert-manager/) the Scaleway alert manager in the same region as the resources you want to be alerted for
+- [Added](/cockpit/how-to/add-contact-points/) contacts in the Scaleway console or contact points in Grafana (with the `Scaleway Alerting` alert manager of the same region as your `Scaleway Metrics` data source), otherwise alerts will not be delivered
-## Switch to data source managed alert rules
+## Switch to the data source-managed tab
-Data source managed alert rules allow you to configure alerts managed by the data source of your choice, instead of using Grafana's managed alerting system which is not supported by Cockpit.
+Data source managed alert rules allow you to configure alerts managed by the data source of your choice, instead of using Grafana's managed alerting system **which is not supported by Cockpit**.
1. [Log in to Grafana](/cockpit/how-to/access-grafana-and-managed-dashboards/) using your credentials.
-2. Click the **Toggle menu** then click **Alerting**.
-3. Click **Alert rules** and **+ New alert rule**.
-4. In the **Define query and alert condition** section, scroll to the **Grafana-managed alert rule** information banner and click **Switch to data source-managed alert rule**. This step is **mandatory** because Cockpit does not support Grafana’s built-in alerting system, but only alerts configured and evaluated by the data source itself. You are redirected to the alert creation process.
-
+2. Click the Grafana icon in the top left side of your screen to open the menu.
+3. Click the arrow next to **Alerting** on the left-side menu, then click **Alert rules**.
+4. Click **+ New alert rule**.
+5. Enter a name for your alert.
+6. In the **Define query and alert condition** section, toggle **Advanced options**.
+
+7. Select the **Scaleway Metrics** data source in the drop-down.
+8. In the **Rule type** subsection, click the **Data source-managed** tab.
+
+
+
+ Data source managed alert rules allow you to configure alerts managed by the data source of your choice, instead of using Grafana's managed alerting system **which is not supported by Cockpit**.
+ This step is **mandatory** because Cockpit does not support Grafana’s built-in alerting system, but only alerts configured and evaluated by the data source itself.
+
## Define your metric and alert conditions
@@ -52,129 +60,118 @@ Switch between the tabs below to create alerts for a Scaleway Instance, an Objec
The steps below explain how to create the metric selection and configure an alert condition that triggers when **your Instance consumes more than 10% of a single CPU core over the past 5 minutes.**
- 1. Type a name for your alert. For example, `alert-for-high-cpu-usage`.
- 2. Select the **Scaleway Metrics** data source.
- 3. Click the **Metrics browser** drop-down.
-
-
- 4. Select the metric you want to configure an alert for. For example, `instance_server_cpu_seconds_total`.
+ 1. In the query field next to the **Loading metrics... >** button, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_id`) correspond to those of the target resource.
+ ```bash
+ rate(instance_server_cpu_seconds_total{resource_id="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"}[5m]) > 0.1
+ ```
The `instance_server_cpu_seconds_total` metric records how many seconds of CPU time your Instance has used in total. It is helpful to detect unexpected CPU usage spikes.
- 5. Select the appropriate labels to filter your metric and target specific resources.
- 6. Choose values for your selected labels. The **Resulting selector** field displays your final query selector.
-
- 7. Click **Use query** to validate your metric selection.
- 8. In the query field next to the **Metrics browser** button, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_id` and `resource_name`) correspond to those of the target resource.
- ```bash
- rate(instance_server_cpu_seconds_total{resource_id="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",resource_name="name-of-your-resource"}[5m]) > 0.1
- ```
- 9. In the **Set alert evaluation behavior** field, specify how long the condition must be true before triggering the alert.
- 10. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert, and optionally, add annotations.
- 11. Enter a label in the **Labels** field and a name in the **Value** field. You can skip this step if you want your alerts to be sent to the contacts you may already have created in the Scaleway console.
+ 2. In the **Set alert evaluation behavior** section, specify how long the condition must be met before triggering the alert.
+ 3. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert rules. Rules that share the same group will use the same configuration, including the evaluation interval which determines how often the rule is evaluated (by default: every 1 minute). You can modify this interval later in the group settings.
+
+ The evaluation interval is different from the pending period set in step 2. The evaluation interval controls how often the rule is checked, while the pending period defines how long the condition must be continuously met before the alert fires.
+
+ 4. In the **Configure labels and notifications** section, click **+ Add labels**. A pop-up appears.
+ 5. Enter a label and value name and click **Save**. You can skip this step if you want your alerts to be sent to the contacts you may already have created in the Scaleway console.
In Grafana, notifications are sent by matching alerts to notification policies based on labels. This step is about deciding how alerts will reach you or your team (Slack, email, etc.) based on labels you attach to them. Then, you can set up rules that define who receives notifications in the **Notification policies** page.
- For example, if an alert has the label `team = instances-team`, you are telling Grafana to send a notification to the Instances team when your alert `alert-for-high-cpu-usage` gets triggered. Find out how to [configure notification policies in Grafana](/tutorials/configure-slack-alerting/#configuring-a-notification-policy).
+ For example, if your alert named `alert-for-high-cpu-usage` has the label `team = instances-team`, you are telling Grafana to send a notification to the Instances team when the alert gets triggered. Find out how to [configure notification policies in Grafana](/tutorials/configure-slack-alerting/#configuring-a-notification-policy).
- 12. Click **Save rule** in the top right corner of your screen to save and activate your alert.
- 13. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contact](/cockpit/concepts/#contacts).
+ 6. Click **Save rule and exit** in the top right corner of your screen to save and activate your alert.
+ 7. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contacts](/cockpit/concepts/#contact-points).
The steps below explain how to create the metric selection and configure an alert condition that triggers when **the object count in your bucket exceeds a specific threshold**.
- 1. Type a name for your alert.
- 2. Select the **Scaleway Metrics** data source.
- 3. Click the **Metrics browser** drop-down.
-
-
- 4. Select the metric you want to configure an alert for. For example, `object_storage_bucket_objects_total`.
-
- The `object_storage_bucket_objects_total` metric indicates the total number of objects stored in a given Object Storage bucket. It is useful to monitor and control object growth in your bucket and avoid hitting thresholds.
-
- 5. Select the appropriate labels to filter your metric and target specific resources.
- 6. Choose values for your selected labels. The **Resulting selector** field displays your final query selector.
- 7. Click **Use query** to validate your metric selection. Your selection displays in the query field next to the **Metrics browser** button. This prepares it for use in the alert condition, which we will define in the next steps.
- 8. In the query field, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_id` and `region`) correspond to those of the target resource.
+ 1. In the query field next to the **Loading metrics... >** button, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_id` and `region`) correspond to those of the target resource.
```bash
object_storage_bucket_objects_total{region="fr-par", resource_id="my-bucket"} > 2000
```
- 9. In the **Set alert evaluation behavior** field, specify how long the condition must be true before triggering the alert.
- 10. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert, and optionally, add annotations.
- 11. Enter a label in the **Labels** field and a name in the **Value** field. You can skip this step if you want your alerts to be sent to the contacts you may already have created in the Scaleway console.
+
+ The `object_storage_bucket_objects_total` metric indicates the total number of objects stored in a given Object Storage bucket. It is useful to monitor and control object growth in your bucket and avoid hitting thresholds.
+
+ 2. In the **Set alert evaluation behavior** section, specify how long the condition must be met before triggering the alert.
+ 3. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert rules. Rules that share the same group will use the same configuration, including the evaluation interval which determines how often the rule is evaluated (by default: every 1 minute). You can modify this interval later in the group settings.
+
+ The evaluation interval is different from the pending period set in step 2. The evaluation interval controls how often the rule is checked, while the pending period defines how long the condition must be continuously met before the alert fires.
+
+ 4. In the **Configure labels and notifications** section, click **+ Add labels**. A pop-up appears.
+ 5. Enter a label and value name and click **Save**. You can skip this step if you want your alerts to be sent to the contacts you may already have created in the Scaleway console.
In Grafana, notifications are sent by matching alerts to notification policies based on labels. This step is about deciding how alerts will reach you or your team (Slack, email, etc.) based on labels you attach to them. Then, you can set up rules that define who receives notifications in the **Notification policies** page.
For example, if an alert has the label `team = object-storage-team`, you are telling Grafana to send a notification to the Object Storage team when your alert is firing. Find out how to [configure notification policies in Grafana](/tutorials/configure-slack-alerting/#configuring-a-notification-policy).
- 12. Click **Save rule** in the top right corner of your screen to save and activate your alert.
- 13. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contact](/cockpit/concepts/#contacts).
+ 6. Click **Save rule and exit** in the top right corner of your screen to save and activate your alert.
+ 7. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contacts](/cockpit/concepts/#contact-points).
The steps below explain how to create the metric selection and configure an alert condition that triggers when **no new pod activity occurs, which could mean your cluster is stuck or unresponsive.**
- 1. Type a name for your alert.
- 2. Select the **Scaleway Metrics** data source.
- 3. Click the **Metrics browser** drop-down.
-
-
- 4. Select the metric you want to configure an alert for. For example, `kubernetes_cluster_k8s_shoot_nodes_pods_usage_total`.
-
- The `kubernetes_cluster_k8s_shoot_nodes_pods_usage_total` metric represents the total number of pods currently running across all nodes in your Kubernetes cluster. It is helpful to monitor current pod consumption per node pool or cluster, and help track resource saturation or unexpected workload spikes.
-
- 5. Select the appropriate labels to filter your metric and target specific resources.
- 6. Choose values for your selected labels. The **Resulting selector** field displays your final query selector.
- 7. Click **Use query** to validate your metric selection. Your selection displays in the query field next to the **Metrics browser** button. This prepares it for use in the alert condition, which we will define in the next steps.
- 8. In the query field, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_name`) correspond to those of the target resource.
+ 1. In the query field next to the **Loading metrics... >** button, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_name`) correspond to those of the target resource.
```bash
rate(kubernetes_cluster_k8s_shoot_nodes_pods_usage_total{resource_name="k8s-par-quizzical-chatelet"}[15m]) == 0
```
- 9. In the **Set alert evaluation behavior** field, specify how long the condition must be true before triggering the alert.
- 10. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert, and optionally, add annotations.
- 11. Enter a label in the **Labels** field and a name in the **Value** field. You can skip this step if you want your alerts to be sent to the contacts you may already have created in the Scaleway console.
+
+ The `kubernetes_cluster_k8s_shoot_nodes_pods_usage_total` metric represents the total number of pods currently running across all nodes in your Kubernetes cluster. It is helpful to monitor current pod consumption per node pool or cluster, and help track resource saturation or unexpected workload spikes.
+
+ 2. In the **Set alert evaluation behavior** field, specify how long the condition must be true before triggering the alert.
+ 3. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert rules. Rules that share the same group will use the same configuration, including the evaluation interval which determines how often the rule is evaluated (by default: every 1 minute). You can modify this interval later in the group settings.
+
+ The evaluation interval is different from the pending period set in step 2. The evaluation interval controls how often the rule is checked, while the pending period defines how long the condition must be continuously met before the alert fires.
+
+ 4. In the **Configure labels and notifications** section, click **+ Add labels**. A pop-up appears.
+ 5. Enter a label and value name and click **Save**. You can skip this step if you want your alerts to be sent to the contacts you may already have created in the Scaleway console.
In Grafana, notifications are sent by matching alerts to notification policies based on labels. This step is about deciding how alerts will reach you or your team (Slack, email, etc.) based on labels you attach to them. Then, you can set up rules that define who receives notifications in the **Notification policies** page.
For example, if an alert has the label `team = kubernetes-team`, you are telling Grafana to send a notification to the Kubernetes team when your alert is firing. Find out how to [configure notification policies in Grafana](/tutorials/configure-slack-alerting/#configuring-a-notification-policy).
- 12. Click **Save rule** in the top right corner of your screen to save and activate your alert.
- 13. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contact](/cockpit/concepts/#contacts).
+ 6. Click **Save rule and exit** in the top right corner of your screen to save and activate your alert.
+ 7. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contacts](/cockpit/concepts/#contact-points).
The steps below explain how to create the metric selection and configure an alert condition that triggers when **no logs are stored for 5 minutes, which may indicate your app or system is broken**.
- 1. Type a name for your alert.
- 2. Select the **Scaleway Metrics** data source.
- 3. Click the **Metrics browser** drop-down.
-
-
- 4. Select the metric you want to configure an alert for. For example, `observability_cockpit_loki_chunk_store_stored_chunks_total:increase5m`.
-
- The `observability_cockpit_loki_chunk_store_stored_chunks_total:increase5m` metric represents the number of chunks (log storage blocks) that have been written over the last 5 minutes for a specific resource. It is useful to monitor log ingestion activity and detect issues such as a crash of the logging agent, or your application not producing logs.
-
- 5. Select the appropriate labels to filter your metric and target specific resources.
- 6. Choose values for your selected labels. The **Resulting selector** field displays your final query selector.
- 7. Click **Use query** to validate your metric selection. Your selection displays in the query field next to the **Metrics browser** button. This prepares it for use in the alert condition, which we will define in the next steps.
- 8. In the query field, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_name`) correspond to those of the target resource.
+ 1. In the query field next to the **Loading metrics... >** button, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_name`) correspond to those of the target resource.
```bash
observability_cockpit_loki_chunk_store_stored_chunks_total:increase5m{resource_id="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"} == 0
```
- 9. In the **Set alert evaluation behavior** field, specify how long the condition must be true before triggering the alert.
- 10. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert, and optionally, add annotations.
- 11. Enter a label in the **Labels** field and a name in the **Value** field. You can skip this step if you want your alerts to be sent to the contacts you may already have created in the Scaleway console.
+
+ The `observability_cockpit_loki_chunk_store_stored_chunks_total:increase5m` metric represents the number of chunks (log storage blocks) that have been written over the last 5 minutes for a specific resource. It is useful to monitor log ingestion activity and detect issues such as a crash of the logging agent, or your application not producing logs.
+
+ 2. In the **Set alert evaluation behavior** field, specify how long the condition must be true before triggering the alert.
+ 3. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert rules. Rules that share the same group will use the same configuration, including the evaluation interval which determines how often the rule is evaluated (by default: every 1 minute). You can modify this interval later in the group settings.
+
+ The evaluation interval is different from the pending period set in step 2. The evaluation interval controls how often the rule is checked, while the pending period defines how long the condition must be continuously met before the alert fires.
+
+ 4. In the **Configure labels and notifications** section, click **+ Add labels**. A pop-up appears.
+ 5. Enter a label and value name and click **Save**. You can skip this step if you want your alerts to be sent to the contacts you may already have created in the Scaleway console.
In Grafana, notifications are sent by matching alerts to notification policies based on labels. This step is about deciding how alerts will reach you or your team (Slack, email, etc.) based on labels you attach to them. Then, you can set up rules that define who receives notifications in the **Notification policies** page.
For example, if an alert has the label `team = cockpit-team`, you are telling Grafana to send a notification to the Cockpit team when your alert is firing. Find out how to [configure notification policies in Grafana](/tutorials/configure-slack-alerting/#configuring-a-notification-policy).
- 12. Click **Save rule** in the top right corner of your screen to save and activate your alert.
- 13. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contact](/cockpit/concepts/#contacts).
+ 6. Click **Save rule and exit** in the top right corner of your screen to save and activate your alert.
+ 7. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contacts](/cockpit/concepts/#contact-points).
-You can view your firing alerts in the **Alert rules** section of your Grafana (**Home** > **Alerting** > **Alert rules**).
+ **You can configure up to a maximum of 10 alerts** for the `Scaleway Metrics` data source.
+
+
+ You can also build your PromQL expressions and find the right metrics from the Grafana-managed tab, with the metrics explorer (book icon) and **+ Operations** button. However, remember to click the **Data source-managed** tab once you are done building them to proceed with the final configuration steps.
+
+
-
+## View firing alerts
-
- You can configure up to a **maximum of 10 alerts** for the `Scaleway Metrics` data source.
-
+ 1. [Log in to Grafana](/cockpit/how-to/access-grafana-and-managed-dashboards/) using your credentials.
+ 2. Click the Grafana icon in the top left side of your screen to open the menu.
+ 3. Click the arrow next to **Alerting** on the left-side menu, then click **Alert rules**.
+ 4. Click the **Firing** tab under the **State** section to filter for firing rules.
+ 5. Click the **Alert** tab under the **Rule type** section to filter for alerting rules.
+
+ Your firing alerts should display.
+
Find out how to send Cockpit's alert notifications to Slack using a webhook URL in our [dedicated documentation](/tutorials/configure-slack-alerting/).
diff --git a/pages/cockpit/how-to/send-logs-from-k8s-to-cockpit.mdx b/pages/cockpit/how-to/send-logs-from-k8s-to-cockpit.mdx
index 6e5e674a7d..4f1ae0ed13 100644
--- a/pages/cockpit/how-to/send-logs-from-k8s-to-cockpit.mdx
+++ b/pages/cockpit/how-to/send-logs-from-k8s-to-cockpit.mdx
@@ -3,7 +3,7 @@ title: How to send logs from your Kubernetes cluster to your Cockpit
description: Learn how to send your pod logs to your Cockpit using Scaleway's comprehensive guide. This tutorial covers sending Kubernetes pods logs to Scaleway's Cockpit for centralized monitoring and analysis using Grafana, ensuring efficient monitoring and log analysis in your infrastructure.
tags: kubernetes cockpit logs observability monitoring cluster
dates:
- validation: 2025-07-31
+ validation: 2025-08-20
posted: 2025-01-20
---
import Requirements from '@macros/iam/requirements.mdx'
@@ -25,7 +25,7 @@ We will use the [k8s-monitoring](https://artifacthub.io/packages/helm/grafana/k8
- [Created](/iam/how-to/create-api-keys/) an API key and retrieved your API secret key
- - Sending logs for Scaleway resources or personal data using an external path is a billable feature. In addition, any data that you push yourself is billed, even if you send data from Scaleway products. Refer to the [product pricing](https://www.scaleway.com/en/pricing/managed-services/#cockpit) page for more information.
+ Sending logs for Scaleway resources or personal data using an external path is a billable feature. In addition, any data that you push yourself is billed, even if you send data from Scaleway products. Refer to the [product pricing](https://www.scaleway.com/en/pricing/?tags=available,managedservices-observability-cockpit) page for more information.
## Configure the Helm chart
@@ -75,8 +75,8 @@ alloy-singleton:
```
- The template above is for sending logs to your Cockpit. You can also configure it to send metrics to Cockpit using this Helm chart.
- Refer to our dedicated documentation to [send metrics from your cluster to Cockpit](/cockpit/how-to/send-metrics-from-k8s-to-cockpit).
+ The template above is for sending logs to your Cockpit. You can also configure it to send metrics to Cockpit using this Helm chart.
+ Refer to our dedicated documentation to [send metrics from your cluster to Cockpit](/cockpit/how-to/send-metrics-from-k8s-to-cockpit).
## Send Kubernetes logs using Helm chart
@@ -140,7 +140,7 @@ You can also use Terraform/OpenTofu to manage and deploy Helm charts, providing
1. Click **Cockpit** in the **Monitoring** section of the Scaleway [console](https://console.scaleway.com/) side menu. The **Cockpit Overview** page displays.
2. Click **Open dashboards** to open your preconfigured dashboards in Grafana. You are redirected to the Grafana website.
3. Log in to Grafana using your [Grafana credentials](/cockpit/how-to/retrieve-grafana-credentials/).
-4. Click the **Home** icon, then click **Explore**.
+4. Click the Grafana icon in the top left side of your screen to open the menu, then click **Explore**.
5. Select your custom data source in the search drop-down on the upper left corner of your screen.
6. In the **Labels filter** drop-down, select the `cluster` label and in the **Value** drop-down, select your cluster.
7. Optionally, click the **Clock** icon on the top right corner of your screen and filter by time range.
diff --git a/pages/cockpit/how-to/send-metrics-from-k8s-to-cockpit.mdx b/pages/cockpit/how-to/send-metrics-from-k8s-to-cockpit.mdx
index f122423f89..b42410402d 100644
--- a/pages/cockpit/how-to/send-metrics-from-k8s-to-cockpit.mdx
+++ b/pages/cockpit/how-to/send-metrics-from-k8s-to-cockpit.mdx
@@ -3,7 +3,7 @@ title: How to send metrics from your Kubernetes cluster to your Cockpit
description: Learn how to send your pod metrics to your Cockpit using Scaleway's comprehensive guide. This tutorial covers sending Kubernetes pods metrics to Scaleway's Cockpit for centralized monitoring and analysis using Grafana, ensuring efficient monitoring and metrics analysis in your infrastructure.
tags: kubernetes cockpit metrics observability monitoring cluster
dates:
- validation: 2025-07-31
+ validation: 2025-08-20
posted: 2025-01-20
---
import Requirements from '@macros/iam/requirements.mdx'
@@ -26,7 +26,7 @@ We will use the [k8s-monitoring](https://artifacthub.io/packages/helm/grafana/k8
- [Created](/iam/how-to/create-api-keys/) an API key and retrieved your API secret key
- - Sending metrics for Scaleway resources or personal data using an external path is a billable feature. In addition, any data that you push yourself is billed, even if you send data from Scaleway products. Refer to the [product pricing](https://www.scaleway.com/en/pricing/managed-services/#cockpit) page for more information.
+ Sending metrics for Scaleway resources or personal data using an external path is a billable feature. In addition, any data that you push yourself is billed, even if you send data from Scaleway products. Refer to the [product pricing](https://www.scaleway.com/en/pricing/?tags=available,managedservices-observability-cockpit) page for more information.
@@ -168,7 +168,7 @@ Now that your metrics are exported to your Cockpit, you can access and query the
1. Click **Cockpit** in the **Monitoring** section of the Scaleway [console](https://console.scaleway.com/) side menu. The **Cockpit Overview** page displays.
2. Click **Open dashboards** to open your preconfigured dashboards in Grafana. You are redirected to the Grafana website.
3. Log in to Grafana using your [Grafana credentials](/cockpit/how-to/retrieve-grafana-credentials/).
-4. Click the **Home** icon, then click **Explore**.
+4. Click the Grafana icon in the top left side of your screen to open the menu, then click **Explore**.
5. Select your custom data source in the search drop-down on the upper left corner of your screen.
6. In the **Labels filter** drop-down, select the `cluster` label and in the **Value** drop-down, select your cluster.
7. Optionally, click the **Clock** icon on the top right corner of your screen and filter by time range.
diff --git a/pages/cockpit/how-to/send-metrics-with-grafana-alloy.mdx b/pages/cockpit/how-to/send-metrics-with-grafana-alloy.mdx
index ebf264e71d..567e93206e 100644
--- a/pages/cockpit/how-to/send-metrics-with-grafana-alloy.mdx
+++ b/pages/cockpit/how-to/send-metrics-with-grafana-alloy.mdx
@@ -94,7 +94,7 @@ For the sake of this documentation, we are using Grafana Alloy on macOS. Refer t
2. Click **Open dashboards** to open your preconfigured dashboards in Grafana. You are redirected to the Grafana website.
3. Enter your [Grafana credentials](/cockpit/how-to/retrieve-grafana-credentials/).
4. Click **Log in**. The Grafana overview page displays.
-5. Click the **Toggle menu** icon in the top left corner of your screen.
+5. Click the Grafana icon in the top left side of your screen to open the menu.
6. Click **Dashboards**, then click **New** in the top right corner of your screen. A drop-down displays.
7. Click **Import** to import a dashboard.
8. In the **Import via grafana.com** field, enter `1860`, then click **Load**
diff --git a/tutorials/configure-slack-alerting/index.mdx b/tutorials/configure-slack-alerting/index.mdx
index 61b70313a1..a735b402b5 100644
--- a/tutorials/configure-slack-alerting/index.mdx
+++ b/tutorials/configure-slack-alerting/index.mdx
@@ -5,11 +5,11 @@ products:
- cockpit
tags: cockpit alerts monitoring notifications slack
dates:
- validation: 2025-02-18
+ validation: 2025-08-20
posted: 2025-02-18
validation_frequency: 12
---
-import image from './assets/scaleway-slack-alerts-firing.webp'
+import SlackNotification from './assets/scaleway-slack-alerts-firing.webp'
import Requirements from '@macros/iam/requirements.mdx'
@@ -53,11 +53,11 @@ As **we do not support Grafana managed alerts**, this documentation only shows y
## Creating your Slack integration in Grafana using the Scaleway Alerting alert manager
-1. [Access](/cockpit/how-to/access-grafana-and-managed-dashboards/) your Grafana dashboard.
-2. Click the **Toggle menu** icon next to **Home** in the top left corner of your screen.
-3. Click **Alerting**, then click **Contact points**.
+1. [Log in to Grafana](/cockpit/how-to/access-grafana-and-managed-dashboards/) using your credentials.
+2. Click the Grafana icon in the top left side of your screen to open the menu.
+3. Click the arrow next to **Alerting** on the left-side menu, then click **Contact points**.
4. Choose the **Scaleway Alerting** alert manager in the drop-down next to the **Choose Alertmanager** field.
-5. Click **+ Add contact point**.
+5. Click **+ Create contact point**.
6. Enter a contact point name. For example, `Slack alerts`.
7. Select **Slack** from the **Integration** drop-down.
8. In the **Channel** field, paste the name of the Slack channel or user to send notifications to.
@@ -65,22 +65,20 @@ As **we do not support Grafana managed alerts**, this documentation only shows y
10. Click **Save contact point**.
- If you have already created contact points, you need to [configure a nested notification policy](#configuring-a-notification-policy) to define which contact point should receive which alert.
+ If you have already created contact points, you need to [configure a child notification policy](#configuring-a-notification-policy) to define which contact point should receive which alert.
## Configuring a notification policy
If you have created multiple contact points in Grafana, the default contact point will receive alerts by default. To make sure your alerts are sent to the desired contact point, you need to define notification policies.
-1. [Access](/cockpit/how-to/access-grafana-and-managed-dashboards/) your Grafana dashboard.
-2. Click the **Toggle menu** icon next to **Home** in the top left corner of your screen.
-3. Click **Alerting**, then click **Notification policies**.
-4. Click **+ New nested policy**.
+1. Click **Alerting**, then click **Notification policies**.
+4. Click **+ New child policy**.
5. In the **Label** field, enter the label `alertname`.
6. In the **Operator** field, select **Equals**.
7. In the **Value** field, enter the name of an existing alert you want your contact point to be notified for. For example, `alert-for-high-cpu-usage`.
8. Optionally, click **+Add matcher** to add more labels.
-9. In the **Contact point** field, select the contact point you have configured for Slack.
+9. In the **Contact point** drop-down, select the contact point you have configured for Slack.
10. Click **Save policy**. Your nested policy displays. You should now get notified on Slack.
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/tutorials/silence-grafana-alerts/index.mdx b/tutorials/silence-grafana-alerts/index.mdx
index 66632a3f45..2a126f0e71 100644
--- a/tutorials/silence-grafana-alerts/index.mdx
+++ b/tutorials/silence-grafana-alerts/index.mdx
@@ -5,7 +5,7 @@ tags: cockpit alerts mute silence monitoring notifications
products:
- cockpit
dates:
- validation: 2025-06-16
+ validation: 2025-07-18
posted: 2025-06-16
validation_frequency: 12
---
@@ -28,8 +28,8 @@ This page shows you how to temporarily mute (silence) alerts without disabling t
1. [Access](/cockpit/how-to/access-grafana-and-managed-dashboards/) your Grafana dashboard.
-2. Click the **Toggle menu** icon next to **Home** in the top left corner of your screen.
-3. Click **Alerting**, then **Silences**. Make sure that **Scaleway Alerting** is selected in the **Choose Alertmanager** drop-down.
+2. Click the **Grafana** icon in the top left corner of your screen to open the menu.
+3. Click the arrow next to **Alerting** then **Silences**. Make sure that **Scaleway Alerting** is selected in the **Choose Alertmanager** drop-down.
4. Click **Create silence**.
5. Click the drop-down in the **Silence start and end** section.
6. Click the **Calendar** icon and select a time range during which you want to silence alerts.