36
36
37
37
38
38
class GrafanaEngine :
39
-
40
39
# Configure a larger HTTP request pool.
41
- # TODO: Review the pool settings and eventually adjust according to concurrency level or other parameters.
40
+ # TODO: Review the pool settings and eventually adjust according
41
+ # to concurrency level or other parameters.
42
42
# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#customizing-pool-behavior
43
43
# https://laike9m.com/blog/requests-secret-pool_connections-and-pool_maxsize,89/
44
44
session_args = dict (pool_connections = 100 , pool_maxsize = 100 , retries = 5 )
@@ -52,7 +52,9 @@ def __init__(self, grafana_url, grafana_token=None):
52
52
53
53
self .concurrency = 5
54
54
55
- self .grafana = self .grafana_client_factory (self .grafana_url , grafana_token = self .grafana_token )
55
+ self .grafana = self .grafana_client_factory (
56
+ self .grafana_url , grafana_token = self .grafana_token
57
+ )
56
58
self .set_user_agent ()
57
59
self .data = GrafanaDataModel ()
58
60
self .finder = JsonPathFinder ()
@@ -66,14 +68,18 @@ def set_session(self, session):
66
68
67
69
def enable_cache (self , expire_after = 60 , drop_cache = False ):
68
70
if expire_after is None :
69
- log .info (f "Response cache will never expire (infinite caching)" )
71
+ log .info ("Response cache will never expire (infinite caching)" )
70
72
elif expire_after == 0 :
71
- log .info (f "Response cache will expire immediately (expire_after=0)" )
73
+ log .info ("Response cache will expire immediately (expire_after=0)" )
72
74
else :
73
75
log .info (f"Response cache will expire after { expire_after } seconds" )
74
76
75
77
session = CachedSession (
76
- cache_name = __appname__ , expire_after = expire_after , use_cache_dir = True , wal = True , ** self .session_args
78
+ cache_name = __appname__ ,
79
+ expire_after = expire_after ,
80
+ use_cache_dir = True ,
81
+ wal = True ,
82
+ ** self .session_args ,
77
83
)
78
84
self .set_session (session )
79
85
self .set_user_agent ()
@@ -86,7 +92,7 @@ def enable_cache(self, expire_after=60, drop_cache=False):
86
92
return self
87
93
88
94
def clear_cache (self ):
89
- log .info (f "Clearing cache" )
95
+ log .info ("Clearing cache" )
90
96
requests_cache .clear ()
91
97
92
98
def enable_concurrency (self , concurrency ):
@@ -171,7 +177,11 @@ def scan_notifications(self):
171
177
if Version (self .grafana .version ) < Version ("11" ):
172
178
self .data .notifications = self .grafana .notifications .lookup_channels ()
173
179
else :
174
- warnings .warn ("Notification channel scanning support for Grafana 11 is not implemented yet" , UserWarning )
180
+ warnings .warn (
181
+ "Notification channel scanning support for Grafana 11 is not implemented yet" ,
182
+ UserWarning ,
183
+ stacklevel = 2 ,
184
+ )
175
185
176
186
def scan_datasources (self ):
177
187
log .info ("Scanning datasources" )
@@ -185,7 +195,8 @@ def scan_datasources(self):
185
195
if isinstance (ex , GrafanaUnauthorizedError ):
186
196
log .error (
187
197
self .get_red_message (
188
- "Please use --grafana-token or GRAFANA_TOKEN " "for authenticating with Grafana"
198
+ "Please use --grafana-token or GRAFANA_TOKEN "
199
+ "for authenticating with Grafana"
189
200
)
190
201
)
191
202
@@ -207,7 +218,7 @@ def scan_dashboards(self, dashboard_uids=None):
207
218
208
219
except GrafanaClientError as ex :
209
220
self .handle_grafana_error (ex )
210
- return
221
+ return None
211
222
212
223
if self .progressbar :
213
224
self .start_progressbar (len (self .data .dashboard_list ))
@@ -221,7 +232,9 @@ def scan_dashboards(self, dashboard_uids=None):
221
232
self .taqadum .close ()
222
233
223
234
# Improve determinism by returning stable sort order.
224
- self .data .dashboards = munchify (sorted (self .data .dashboards , key = lambda x : x ["dashboard" ]["uid" ]))
235
+ self .data .dashboards = munchify (
236
+ sorted (self .data .dashboards , key = lambda x : x ["dashboard" ]["uid" ])
237
+ )
225
238
226
239
return self .data .dashboards
227
240
@@ -231,7 +244,9 @@ def handle_grafana_error(self, ex):
231
244
log .error (self .get_red_message (message ))
232
245
if isinstance (ex , GrafanaUnauthorizedError ):
233
246
log .error (
234
- self .get_red_message ("Please use --grafana-token or GRAFANA_TOKEN " "for authenticating with Grafana" )
247
+ self .get_red_message (
248
+ "Please use --grafana-token or GRAFANA_TOKEN for authenticating with Grafana"
249
+ )
235
250
)
236
251
237
252
def fetch_dashboard (self , dashboard_info ):
@@ -270,10 +285,13 @@ async def execute_parallel(self):
270
285
# for response in await asyncio.gather(*tasks):
271
286
# pass
272
287
288
+ @staticmethod
289
+ def get_red_message (message ):
290
+ return colored .stylize (message , colored .fg ("red" ) + colored .attr ("bold" ))
291
+
273
292
274
293
class GrafanaWtf (GrafanaEngine ):
275
294
def info (self ):
276
-
277
295
response = OrderedDict (
278
296
grafana = OrderedDict (
279
297
version = self .version ,
@@ -308,7 +326,9 @@ def info(self):
308
326
309
327
# Count numbers of panels, annotations and variables for all dashboards.
310
328
try :
311
- dashboard_summary = OrderedDict (dashboard_panels = 0 , dashboard_annotations = 0 , dashboard_templating = 0 )
329
+ dashboard_summary = OrderedDict (
330
+ dashboard_panels = 0 , dashboard_annotations = 0 , dashboard_templating = 0
331
+ )
312
332
for dbdetails in self .dashboard_details ():
313
333
# TODO: Should there any deduplication be applied when counting those entities?
314
334
dashboard_summary ["dashboard_panels" ] += len (dbdetails .panels )
@@ -324,7 +344,9 @@ def info(self):
324
344
def build_info (self ):
325
345
response = None
326
346
error = None
327
- error_template = f"The request to { self .grafana_url .rstrip ('/' )} /api/frontend/settings failed"
347
+ error_template = (
348
+ f"The request to { self .grafana_url .rstrip ('/' )} /api/frontend/settings failed"
349
+ )
328
350
try :
329
351
response = self .grafana .client .GET ("/frontend/settings" )
330
352
if not isinstance (response , dict ):
@@ -353,7 +375,9 @@ def dashboard_details(self):
353
375
yield DashboardDetails (dashboard = dashboard )
354
376
355
377
def search (self , expression ):
356
- log .info ('Searching Grafana at "{}" for expression "{}"' .format (self .grafana_url , expression ))
378
+ log .info (
379
+ 'Searching Grafana at "{}" for expression "{}"' .format (self .grafana_url , expression )
380
+ )
357
381
358
382
results = Munch (datasources = [], dashboard_list = [], dashboards = [])
359
383
@@ -370,7 +394,9 @@ def search(self, expression):
370
394
def replace (self , expression , replacement , dry_run : bool = False ):
371
395
if dry_run :
372
396
log .info ("Dry-run mode enabled, skipping any actions" )
373
- log .info (f'Replacing "{ expression } " by "{ replacement } " within Grafana at "{ self .grafana_url } "' )
397
+ log .info (
398
+ f'Replacing "{ expression } " by "{ replacement } " within Grafana at "{ self .grafana_url } "'
399
+ )
374
400
for dashboard in self .data .dashboards :
375
401
payload_before = json .dumps (dashboard )
376
402
payload_after = payload_before .replace (expression , replacement )
@@ -433,29 +459,27 @@ def search_items(self, expression, items, results):
433
459
if effective_item :
434
460
results .append (effective_item )
435
461
436
- @staticmethod
437
- def get_red_message (message ):
438
- return colored .stylize (message , colored .fg ("red" ) + colored .attr ("bold" ))
439
-
440
462
def get_dashboard_versions (self , dashboard_id ):
441
463
# https://grafana.com/docs/http_api/dashboard_versions/
442
464
get_dashboard_versions_path = "/dashboards/id/%s/versions" % dashboard_id
443
- r = self .grafana .dashboard .client .GET (get_dashboard_versions_path )
444
- return r
465
+ return self .grafana .dashboard .client .GET (get_dashboard_versions_path )
445
466
446
467
def explore_datasources (self ):
447
468
# Prepare indexes, mapping dashboards by uid, datasources by name
448
469
# as well as dashboards to datasources and vice versa.
449
470
ix = Indexer (engine = self )
450
471
451
- # Compute list of exploration items, associating datasources with the dashboards that use them.
472
+ # Compute list of exploration items, associating
473
+ # datasources with the dashboards that use them.
452
474
results_used = []
453
475
results_unused = []
454
476
for datasource in ix .datasources :
455
477
ds_identifier = datasource .get ("uid" , datasource .get ("name" ))
456
478
dashboard_uids = ix .datasource_dashboard_index .get (ds_identifier , [])
457
479
dashboards = list (map (ix .dashboard_by_uid .get , dashboard_uids ))
458
- item = DatasourceExplorationItem (datasource = datasource , used_in = dashboards , grafana_url = self .grafana_url )
480
+ item = DatasourceExplorationItem (
481
+ datasource = datasource , used_in = dashboards , grafana_url = self .grafana_url
482
+ )
459
483
460
484
# Format results in a more compact form, using only a subset of all the attributes.
461
485
result = item .format_compact ()
@@ -466,16 +490,18 @@ def explore_datasources(self):
466
490
if result not in results_unused :
467
491
results_unused .append (result )
468
492
469
- results_used = sorted (results_used , key = lambda x : x ["datasource" ]["name" ] or x ["datasource" ]["uid" ])
470
- results_unused = sorted (results_unused , key = lambda x : x ["datasource" ]["name" ] or x ["datasource" ]["uid" ])
493
+ results_used = sorted (
494
+ results_used , key = lambda x : x ["datasource" ]["name" ] or x ["datasource" ]["uid" ]
495
+ )
496
+ results_unused = sorted (
497
+ results_unused , key = lambda x : x ["datasource" ]["name" ] or x ["datasource" ]["uid" ]
498
+ )
471
499
472
- response = OrderedDict (
500
+ return OrderedDict (
473
501
used = results_used ,
474
502
unused = results_unused ,
475
503
)
476
504
477
- return response
478
-
479
505
def explore_dashboards (self , with_data_details : bool = False , queries_only : bool = False ):
480
506
# Prepare indexes, mapping dashboards by uid, datasources by name
481
507
# as well as dashboards to datasources and vice versa.
@@ -484,7 +510,8 @@ def explore_dashboards(self, with_data_details: bool = False, queries_only: bool
484
510
# Those dashboard names or uids will be ignored.
485
511
ignore_dashboards = ["-- Grafana --" , "-- Mixed --" , "grafana" , "-- Dashboard --" ]
486
512
487
- # Compute list of exploration items, looking for dashboards with missing data sources.
513
+ # Compute list of exploration items, looking
514
+ # for dashboards with missing data sources.
488
515
results = []
489
516
for uid in sorted (ix .dashboard_by_uid ):
490
517
dashboard = ix .dashboard_by_uid [uid ]
@@ -597,13 +624,17 @@ def channels_list_by_uid(self, channel_uid):
597
624
for dashboard in dashboards :
598
625
for panel in dashboard ["dashboard" ].get ("panels" , []):
599
626
if "alert" in panel and panel ["alert" ]["notifications" ]:
600
- related_panels += self .extract_channel_related_information (channel_uid , dashboard , panel )
627
+ related_panels += self .extract_channel_related_information (
628
+ channel_uid , dashboard , panel
629
+ )
601
630
602
631
# Some dashboards have a deeper nested structure
603
632
elif "panels" in panel :
604
633
for subpanel in panel ["panels" ]:
605
634
if "alert" in subpanel and subpanel ["alert" ]["notifications" ]:
606
- related_panels += self .extract_channel_related_information (channel_uid , dashboard , subpanel )
635
+ related_panels += self .extract_channel_related_information (
636
+ channel_uid , dashboard , subpanel
637
+ )
607
638
if related_panels :
608
639
channel ["related_panels" ] = related_panels
609
640
return channel
@@ -613,7 +644,9 @@ def extract_channel_related_information(channel_uid, dashboard, panel):
613
644
related_information = []
614
645
for notification in panel ["alert" ]["notifications" ]:
615
646
if "uid" in notification and notification ["uid" ] == channel_uid :
616
- related_information .append ({"dashboard" : dashboard ["dashboard" ]["title" ], "panel" : panel ["title" ]})
647
+ related_information .append (
648
+ {"dashboard" : dashboard ["dashboard" ]["title" ], "panel" : panel ["title" ]}
649
+ )
617
650
return related_information
618
651
619
652
0 commit comments