Skip to content

feat: grafana modular lib Couchbase #1454

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 13 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
{
prometheusAlerts+:: {
groups+: [
new(this): {
groups: [
{
name: 'couchbase',
rules: [
{
alert: 'CouchbaseHighCPUUsage',
expr: |||
(sys_cpu_utilization_rate) > %(alertsCriticalCPUUsage)s
||| % $._config,
||| % this.config,
'for': '5m',
labels: {
severity: 'critical',
Expand All @@ -19,14 +19,14 @@
(
'{{ printf "%%.0f" $value }} percent CPU usage on node {{$labels.instance}} and on cluster {{$labels.couchbase_cluster}}, ' +
'which is above the threshold of %(alertsCriticalCPUUsage)s.'
) % $._config,
) % this.config,
},
},
{
alert: 'CouchbaseHighMemoryUsage',
expr: |||
100 * (sys_mem_actual_used / clamp_min(sys_mem_actual_used + sys_mem_actual_free, 1)) > %(alertsCriticalMemoryUsage)s
||| % $._config,
||| % this.config,
'for': '5m',
labels: {
severity: 'critical',
Expand All @@ -37,14 +37,14 @@
(
'{{ printf "%%.0f" $value }} percent memory usage on node {{$labels.instance}} and on cluster {{$labels.couchbase_cluster}}, ' +
'which is above the threshold of %(alertsCriticalMemoryUsage)s.'
) % $._config,
) % this.config,
},
},
{
alert: 'CouchbaseMemoryEvictionRate',
expr: |||
(kv_ep_num_value_ejects) > %(alertsWarningMemoryEvictionRate)s
||| % $._config,
||| % this.config,
'for': '5m',
labels: {
severity: 'warning',
Expand All @@ -55,14 +55,14 @@
(
'{{ printf "%%.0f" $value }} evictions in bucket {{$labels.bucket}}, on node {{$labels.instance}}, and on cluster {{$labels.couchbase_cluster}}, ' +
'which is above the threshold of %(alertsWarningMemoryEvictionRate)s.'
) % $._config,
) % this.config,
},
},
{
alert: 'CouchbaseInvalidRequestVolume',
expr: |||
sum without(instance, job) (rate(n1ql_invalid_requests[2m])) > %(alertsWarningInvalidRequestVolume)s
||| % $._config,
||| % this.config,
'for': '2m',
labels: {
severity: 'warning',
Expand All @@ -73,7 +73,7 @@
(
'{{ printf "%%.0f" $value }} invalid requests to {{$labels.couchbase_cluster}}, ' +
'which is above the threshold of %(alertsWarningInvalidRequestVolume)s.'
) % $._config,
) % this.config,
},
},
],
Expand Down
50 changes: 36 additions & 14 deletions couchbase-mixin/config.libsonnet
Original file line number Diff line number Diff line change
@@ -1,19 +1,41 @@
{
_config+:: {
enableMultiCluster: false,
couchbaseSelector: if self.enableMultiCluster then 'job=~"$job", cluster=~"$cluster"' else 'job=~"$job"',
multiclusterSelector: 'job=~"$job"',
dashboardTags: ['couchbase-mixin'],
dashboardPeriod: 'now-1h',
dashboardTimezone: 'default',
dashboardRefresh: '1m',
enableMultiCluster: false,
filteringSelector: 'job=~"integrations/couchbase"',
groupLabels: if self.enableMultiCluster then ['job', 'cluster', 'couchbase_cluster'] else ['job', 'couchbase_cluster'],
instanceLabels: ['instance'],
dashboardTags: ['couchbase-mixin'],
uid: 'couchbase',
dashboardNamePrefix: 'Couchbase',

// alerts thresholds
alertsCriticalCPUUsage: 85, // %
alertsCriticalMemoryUsage: 85, // %
alertsWarningMemoryEvictionRate: 10, // count
alertsWarningInvalidRequestVolume: 1000, // count

enableLokiLogs: true,
// additional params
dashboardPeriod: 'now-1h',
dashboardTimezone: 'default',
dashboardRefresh: '1m',

// logs lib related
enableLokiLogs: true,
logLabels: if self.enableMultiCluster then ['job', 'instance', 'cluster', 'level'] else ['job', 'instance', 'level'],
extraLogLabels: [], // Required by logs-lib
logsVolumeGroupBy: 'level',
showLogsVolume: true,

// alerts thresholds
alertsCriticalCPUUsage: 85, // %
alertsCriticalMemoryUsage: 85, // %
alertsWarningMemoryEvictionRate: 10, // count
alertsWarningInvalidRequestVolume: 1000, // count

// metrics source for signals library
metricsSource: 'prometheus',

// expose signals library
local config = self,
signals+: {
cluster: (import './signals/cluster.libsonnet')(config),
node: (import './signals/node.libsonnet')(config),
query: (import './signals/query.libsonnet')(config),
bucket: (import './signals/bucket.libsonnet')(config),
index: (import './signals/index.libsonnet')(config),
},
}
155 changes: 155 additions & 0 deletions couchbase-mixin/dashboards.libsonnet
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
local g = import './g.libsonnet';
local logslib = import 'logs-lib/logs/main.libsonnet';
{
local root = self,
new(this)::
local prefix = this.config.dashboardNamePrefix;
local links = this.grafana.links;
local tags = this.config.dashboardTags;
local uid = g.util.string.slugify(this.config.uid);
local vars = this.grafana.variables;
local annotations = this.grafana.annotations;
local refresh = this.config.dashboardRefresh;
local period = this.config.dashboardPeriod;
local timezone = this.config.dashboardTimezone;
local panels = this.grafana.panels;

{
'couchbase-bucket-overview.json':
g.dashboard.new(prefix + ' bucket overview')
+ g.dashboard.withPanels(
g.util.grid.wrapPanels(
[
panels.bucket_topBucketsByMemoryUsedPanel { gridPos+: { w: 12 } },
panels.bucket_topBucketsByDiskUsedPanel { gridPos+: { w: 12 } },
panels.bucket_topBucketsByCurrentItemsPanel { gridPos+: { w: 8 } },
panels.bucket_topBucketsByOperationsPanel { gridPos+: { w: 8 } },
panels.bucket_topBucketsByOperationsFailedPanel { gridPos+: { w: 8 } },
panels.bucket_topBucketsByHighPriorityRequestsPanel { gridPos+: { w: 12 } },
panels.bucket_bottomBucketsByCacheHitRatioPanel { gridPos+: { w: 12 } },
panels.bucket_topBucketsByVBucketsCountPanel { gridPos+: { w: 12 } },
panels.bucket_topBucketsByVBucketQueueMemoryPanel { gridPos+: { w: 12 } },
],
)
)
+ root.applyCommon(
vars.multiInstance,
uid + '_couchbase_bucket_overview',
tags,
links { couchbaseBucketOverview+:: {} },
annotations,
timezone,
refresh,
period
),

'couchbase-node-overview.json':
g.dashboard.new(prefix + ' node overview')
+ g.dashboard.withPanels(
g.util.panel.resolveCollapsedFlagOnRows(
g.util.grid.wrapPanels(
[
panels.node_memoryUtilizationPanel { gridPos+: { w: 12 } },
panels.node_cpuUtilizationPanel { gridPos+: { w: 12 } },
panels.node_totalMemoryUsedByServicePanel { gridPos+: { w: 8 } },
panels.node_backupSizePanel { gridPos+: { w: 8 } },
panels.node_currentConnectionsPanel { gridPos+: { w: 8 } },
panels.node_httpResponseCodesPanel { gridPos+: { w: 12 } },
panels.node_httpRequestMethodsPanel { gridPos+: { w: 12 } },
panels.node_queryServiceRequestsPanel { gridPos+: { w: 12 } },
panels.node_queryServiceRequestProcessingTimePanel { gridPos+: { w: 12 } },
panels.node_indexServiceRequestsPanel { gridPos+: { w: 8 } },
panels.node_indexCacheHitRatioPanel { gridPos+: { w: 8 } },
panels.node_averageScanLatencyPanel { gridPos+: { w: 8 } },
]
)
)
)
+ root.applyCommon(
vars.multiInstance,
uid + '_couchbase_node_overview',
tags,
links { couchbaseNodeOverview+:: {} },
annotations,
timezone,
refresh,
period
),

'couchbase-cluster-overview.json':
g.dashboard.new(prefix + ' cluster overview')
+ g.dashboard.withPanels(
g.util.panel.resolveCollapsedFlagOnRows(
g.util.grid.wrapPanels(
[
panels.cluster_topNodesByMemoryUsagePanel { gridPos+: { w: 12 } },
panels.cluster_topNodesByHTTPRequestsPanel { gridPos+: { w: 12 } },
panels.cluster_topNodesByQueryServiceRequestsPanel { gridPos+: { w: 12 } },
panels.cluster_topNodesByIndexAverageScanLatencyPanel { gridPos+: { w: 12 } },
panels.cluster_xdcrReplicationRatePanel { gridPos+: { w: 8 } },
panels.cluster_xdcrDocsReceivedPanel { gridPos+: { w: 8 } },
panels.cluster_localBackupSizePanel { gridPos+: { w: 8 } },
] + this.grafana.rows.clusterOverviewBucket,
)
)
)
+ root.applyCommon(
vars.multiInstance,
uid + '_couchbase_cluster_overview',
tags,
links { couchbaseClusterOverview+:: {} },
annotations,
timezone,
refresh,
period
),

}
+
if this.config.enableLokiLogs then
{
'logs.json':
logslib.new(
prefix + ' logs',
datasourceName=this.grafana.variables.datasources.loki.name,
datasourceRegex=this.grafana.variables.datasources.loki.regex,
filterSelector=this.config.filteringSelector,
labels=this.config.groupLabels + this.config.extraLogLabels,
formatParser=null,
showLogsVolume=this.config.showLogsVolume,
)
{
dashboards+:
{
logs+:
// reference to self, already generated variables, to keep them, but apply other common data in applyCommon
root.applyCommon(super.logs.templating.list, uid=uid + '-logs', tags=tags, links=links { logs+:: {} }, annotations=annotations, timezone=timezone, refresh=refresh, period=period),
},
panels+:
{
// modify log panel
logs+:
g.panel.logs.options.withEnableLogDetails(true)
+ g.panel.logs.options.withShowTime(false)
+ g.panel.logs.options.withWrapLogMessage(false),
},
variables+: {
// add prometheus datasource for annotations processing
toArray+: [
this.grafana.variables.datasources.prometheus { hide: 2 },
],
},
}.dashboards.logs,
}
else {},

applyCommon(vars, uid, tags, links, annotations, timezone, refresh, period):
g.dashboard.withTags(tags)
+ g.dashboard.withUid(uid)
+ g.dashboard.withLinks(std.objectValues(links))
+ g.dashboard.withTimezone(timezone)
+ g.dashboard.withRefresh(refresh)
+ g.dashboard.time.withFrom(period)
+ g.dashboard.withVariables(vars)
+ g.dashboard.withAnnotations(std.objectValues(annotations)),
}
Loading
Loading