Skip to content

Commit 12dcbb3

Browse files
committed
Chore: Format code using Ruff
1 parent 403a922 commit 12dcbb3

File tree

14 files changed

+313
-138
lines changed

14 files changed

+313
-138
lines changed

Makefile

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@ $(eval pytest := $(venvpath)/bin/pytest)
1414
$(eval bumpversion := $(venvpath)/bin/bumpversion)
1515
$(eval twine := $(venvpath)/bin/twine)
1616
$(eval sphinx := $(venvpath)/bin/sphinx-build)
17-
$(eval black := $(venvpath)/bin/black)
18-
$(eval isort := $(venvpath)/bin/isort)
17+
$(eval ruff := $(venvpath)/bin/ruff)
1918

2019

2120
# Setup Python virtualenv
@@ -54,8 +53,8 @@ test-coverage: install-tests
5453
# Formatting
5554
# ----------
5655
format: install-releasetools
57-
$(isort) grafana_wtf test
58-
$(black) .
56+
$(ruff) format
57+
$(ruff) check --fix --ignore=ERA --ignore=F401 --ignore=F841 --ignore=T20 --ignore=ERA001 .
5958

6059

6160
# -------

grafana_wtf/commands.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ def run():
180180
export CACHE_TTL=infinite
181181
grafana-wtf find geohash
182182
183-
"""
183+
""" # noqa: E501
184184

185185
# Parse command line arguments
186186
options = normalize_options(docopt(run.__doc__, version=f"{__appname__} {__version__}"))
@@ -225,7 +225,8 @@ def run():
225225
# Sanity checks
226226
if grafana_url is None:
227227
raise DocoptExit(
228-
'No Grafana URL given. Please use "--grafana-url" option or environment variable "GRAFANA_URL".'
228+
'No Grafana URL given. Please use "--grafana-url" option '
229+
'or environment variable "GRAFANA_URL".'
229230
)
230231

231232
log.info(f"Grafana location: {grafana_url}")
@@ -273,7 +274,8 @@ def run():
273274
# Sanity checks.
274275
if output_format.startswith("tab") and options.sql:
275276
raise DocoptExit(
276-
f"Options --format={output_format} and --sql can not be used together, only data output is supported."
277+
f"Options --format={output_format} and --sql can not be used together, "
278+
f"only data output is supported."
277279
)
278280

279281
entries = engine.log(dashboard_uid=options.dashboard_uid)
@@ -319,7 +321,9 @@ def run():
319321
output_results(output_format, results)
320322

321323
if options.explore and options.dashboards:
322-
results = engine.explore_dashboards(with_data_details=options.data_details, queries_only=options.queries_only)
324+
results = engine.explore_dashboards(
325+
with_data_details=options.data_details, queries_only=options.queries_only
326+
)
323327
output_results(output_format, results)
324328

325329
if options.explore and options.permissions:

grafana_wtf/core.py

Lines changed: 67 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,9 @@
3636

3737

3838
class GrafanaEngine:
39-
4039
# Configure a larger HTTP request pool.
41-
# TODO: Review the pool settings and eventually adjust according to concurrency level or other parameters.
40+
# TODO: Review the pool settings and eventually adjust according
41+
# to concurrency level or other parameters.
4242
# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#customizing-pool-behavior
4343
# https://laike9m.com/blog/requests-secret-pool_connections-and-pool_maxsize,89/
4444
session_args = dict(pool_connections=100, pool_maxsize=100, retries=5)
@@ -52,7 +52,9 @@ def __init__(self, grafana_url, grafana_token=None):
5252

5353
self.concurrency = 5
5454

55-
self.grafana = self.grafana_client_factory(self.grafana_url, grafana_token=self.grafana_token)
55+
self.grafana = self.grafana_client_factory(
56+
self.grafana_url, grafana_token=self.grafana_token
57+
)
5658
self.set_user_agent()
5759
self.data = GrafanaDataModel()
5860
self.finder = JsonPathFinder()
@@ -66,14 +68,18 @@ def set_session(self, session):
6668

6769
def enable_cache(self, expire_after=60, drop_cache=False):
6870
if expire_after is None:
69-
log.info(f"Response cache will never expire (infinite caching)")
71+
log.info("Response cache will never expire (infinite caching)")
7072
elif expire_after == 0:
71-
log.info(f"Response cache will expire immediately (expire_after=0)")
73+
log.info("Response cache will expire immediately (expire_after=0)")
7274
else:
7375
log.info(f"Response cache will expire after {expire_after} seconds")
7476

7577
session = CachedSession(
76-
cache_name=__appname__, expire_after=expire_after, use_cache_dir=True, wal=True, **self.session_args
78+
cache_name=__appname__,
79+
expire_after=expire_after,
80+
use_cache_dir=True,
81+
wal=True,
82+
**self.session_args,
7783
)
7884
self.set_session(session)
7985
self.set_user_agent()
@@ -86,7 +92,7 @@ def enable_cache(self, expire_after=60, drop_cache=False):
8692
return self
8793

8894
def clear_cache(self):
89-
log.info(f"Clearing cache")
95+
log.info("Clearing cache")
9096
requests_cache.clear()
9197

9298
def enable_concurrency(self, concurrency):
@@ -171,7 +177,11 @@ def scan_notifications(self):
171177
if Version(self.grafana.version) < Version("11"):
172178
self.data.notifications = self.grafana.notifications.lookup_channels()
173179
else:
174-
warnings.warn("Notification channel scanning support for Grafana 11 is not implemented yet", UserWarning)
180+
warnings.warn(
181+
"Notification channel scanning support for Grafana 11 is not implemented yet",
182+
UserWarning,
183+
stacklevel=2,
184+
)
175185

176186
def scan_datasources(self):
177187
log.info("Scanning datasources")
@@ -185,7 +195,8 @@ def scan_datasources(self):
185195
if isinstance(ex, GrafanaUnauthorizedError):
186196
log.error(
187197
self.get_red_message(
188-
"Please use --grafana-token or GRAFANA_TOKEN " "for authenticating with Grafana"
198+
"Please use --grafana-token or GRAFANA_TOKEN "
199+
"for authenticating with Grafana"
189200
)
190201
)
191202

@@ -207,7 +218,7 @@ def scan_dashboards(self, dashboard_uids=None):
207218

208219
except GrafanaClientError as ex:
209220
self.handle_grafana_error(ex)
210-
return
221+
return None
211222

212223
if self.progressbar:
213224
self.start_progressbar(len(self.data.dashboard_list))
@@ -221,7 +232,9 @@ def scan_dashboards(self, dashboard_uids=None):
221232
self.taqadum.close()
222233

223234
# Improve determinism by returning stable sort order.
224-
self.data.dashboards = munchify(sorted(self.data.dashboards, key=lambda x: x["dashboard"]["uid"]))
235+
self.data.dashboards = munchify(
236+
sorted(self.data.dashboards, key=lambda x: x["dashboard"]["uid"])
237+
)
225238

226239
return self.data.dashboards
227240

@@ -231,7 +244,9 @@ def handle_grafana_error(self, ex):
231244
log.error(self.get_red_message(message))
232245
if isinstance(ex, GrafanaUnauthorizedError):
233246
log.error(
234-
self.get_red_message("Please use --grafana-token or GRAFANA_TOKEN " "for authenticating with Grafana")
247+
self.get_red_message(
248+
"Please use --grafana-token or GRAFANA_TOKEN for authenticating with Grafana"
249+
)
235250
)
236251

237252
def fetch_dashboard(self, dashboard_info):
@@ -270,10 +285,13 @@ async def execute_parallel(self):
270285
# for response in await asyncio.gather(*tasks):
271286
# pass
272287

288+
@staticmethod
289+
def get_red_message(message):
290+
return colored.stylize(message, colored.fg("red") + colored.attr("bold"))
291+
273292

274293
class GrafanaWtf(GrafanaEngine):
275294
def info(self):
276-
277295
response = OrderedDict(
278296
grafana=OrderedDict(
279297
version=self.version,
@@ -308,7 +326,9 @@ def info(self):
308326

309327
# Count numbers of panels, annotations and variables for all dashboards.
310328
try:
311-
dashboard_summary = OrderedDict(dashboard_panels=0, dashboard_annotations=0, dashboard_templating=0)
329+
dashboard_summary = OrderedDict(
330+
dashboard_panels=0, dashboard_annotations=0, dashboard_templating=0
331+
)
312332
for dbdetails in self.dashboard_details():
313333
# TODO: Should there any deduplication be applied when counting those entities?
314334
dashboard_summary["dashboard_panels"] += len(dbdetails.panels)
@@ -324,7 +344,9 @@ def info(self):
324344
def build_info(self):
325345
response = None
326346
error = None
327-
error_template = f"The request to {self.grafana_url.rstrip('/')}/api/frontend/settings failed"
347+
error_template = (
348+
f"The request to {self.grafana_url.rstrip('/')}/api/frontend/settings failed"
349+
)
328350
try:
329351
response = self.grafana.client.GET("/frontend/settings")
330352
if not isinstance(response, dict):
@@ -353,7 +375,9 @@ def dashboard_details(self):
353375
yield DashboardDetails(dashboard=dashboard)
354376

355377
def search(self, expression):
356-
log.info('Searching Grafana at "{}" for expression "{}"'.format(self.grafana_url, expression))
378+
log.info(
379+
'Searching Grafana at "{}" for expression "{}"'.format(self.grafana_url, expression)
380+
)
357381

358382
results = Munch(datasources=[], dashboard_list=[], dashboards=[])
359383

@@ -370,7 +394,9 @@ def search(self, expression):
370394
def replace(self, expression, replacement, dry_run: bool = False):
371395
if dry_run:
372396
log.info("Dry-run mode enabled, skipping any actions")
373-
log.info(f'Replacing "{expression}" by "{replacement}" within Grafana at "{self.grafana_url}"')
397+
log.info(
398+
f'Replacing "{expression}" by "{replacement}" within Grafana at "{self.grafana_url}"'
399+
)
374400
for dashboard in self.data.dashboards:
375401
payload_before = json.dumps(dashboard)
376402
payload_after = payload_before.replace(expression, replacement)
@@ -433,29 +459,27 @@ def search_items(self, expression, items, results):
433459
if effective_item:
434460
results.append(effective_item)
435461

436-
@staticmethod
437-
def get_red_message(message):
438-
return colored.stylize(message, colored.fg("red") + colored.attr("bold"))
439-
440462
def get_dashboard_versions(self, dashboard_id):
441463
# https://grafana.com/docs/http_api/dashboard_versions/
442464
get_dashboard_versions_path = "/dashboards/id/%s/versions" % dashboard_id
443-
r = self.grafana.dashboard.client.GET(get_dashboard_versions_path)
444-
return r
465+
return self.grafana.dashboard.client.GET(get_dashboard_versions_path)
445466

446467
def explore_datasources(self):
447468
# Prepare indexes, mapping dashboards by uid, datasources by name
448469
# as well as dashboards to datasources and vice versa.
449470
ix = Indexer(engine=self)
450471

451-
# Compute list of exploration items, associating datasources with the dashboards that use them.
472+
# Compute list of exploration items, associating
473+
# datasources with the dashboards that use them.
452474
results_used = []
453475
results_unused = []
454476
for datasource in ix.datasources:
455477
ds_identifier = datasource.get("uid", datasource.get("name"))
456478
dashboard_uids = ix.datasource_dashboard_index.get(ds_identifier, [])
457479
dashboards = list(map(ix.dashboard_by_uid.get, dashboard_uids))
458-
item = DatasourceExplorationItem(datasource=datasource, used_in=dashboards, grafana_url=self.grafana_url)
480+
item = DatasourceExplorationItem(
481+
datasource=datasource, used_in=dashboards, grafana_url=self.grafana_url
482+
)
459483

460484
# Format results in a more compact form, using only a subset of all the attributes.
461485
result = item.format_compact()
@@ -466,16 +490,18 @@ def explore_datasources(self):
466490
if result not in results_unused:
467491
results_unused.append(result)
468492

469-
results_used = sorted(results_used, key=lambda x: x["datasource"]["name"] or x["datasource"]["uid"])
470-
results_unused = sorted(results_unused, key=lambda x: x["datasource"]["name"] or x["datasource"]["uid"])
493+
results_used = sorted(
494+
results_used, key=lambda x: x["datasource"]["name"] or x["datasource"]["uid"]
495+
)
496+
results_unused = sorted(
497+
results_unused, key=lambda x: x["datasource"]["name"] or x["datasource"]["uid"]
498+
)
471499

472-
response = OrderedDict(
500+
return OrderedDict(
473501
used=results_used,
474502
unused=results_unused,
475503
)
476504

477-
return response
478-
479505
def explore_dashboards(self, with_data_details: bool = False, queries_only: bool = False):
480506
# Prepare indexes, mapping dashboards by uid, datasources by name
481507
# as well as dashboards to datasources and vice versa.
@@ -484,7 +510,8 @@ def explore_dashboards(self, with_data_details: bool = False, queries_only: bool
484510
# Those dashboard names or uids will be ignored.
485511
ignore_dashboards = ["-- Grafana --", "-- Mixed --", "grafana", "-- Dashboard --"]
486512

487-
# Compute list of exploration items, looking for dashboards with missing data sources.
513+
# Compute list of exploration items, looking
514+
# for dashboards with missing data sources.
488515
results = []
489516
for uid in sorted(ix.dashboard_by_uid):
490517
dashboard = ix.dashboard_by_uid[uid]
@@ -597,13 +624,17 @@ def channels_list_by_uid(self, channel_uid):
597624
for dashboard in dashboards:
598625
for panel in dashboard["dashboard"].get("panels", []):
599626
if "alert" in panel and panel["alert"]["notifications"]:
600-
related_panels += self.extract_channel_related_information(channel_uid, dashboard, panel)
627+
related_panels += self.extract_channel_related_information(
628+
channel_uid, dashboard, panel
629+
)
601630

602631
# Some dashboards have a deeper nested structure
603632
elif "panels" in panel:
604633
for subpanel in panel["panels"]:
605634
if "alert" in subpanel and subpanel["alert"]["notifications"]:
606-
related_panels += self.extract_channel_related_information(channel_uid, dashboard, subpanel)
635+
related_panels += self.extract_channel_related_information(
636+
channel_uid, dashboard, subpanel
637+
)
607638
if related_panels:
608639
channel["related_panels"] = related_panels
609640
return channel
@@ -613,7 +644,9 @@ def extract_channel_related_information(channel_uid, dashboard, panel):
613644
related_information = []
614645
for notification in panel["alert"]["notifications"]:
615646
if "uid" in notification and notification["uid"] == channel_uid:
616-
related_information.append({"dashboard": dashboard["dashboard"]["title"], "panel": panel["title"]})
647+
related_information.append(
648+
{"dashboard": dashboard["dashboard"]["title"], "panel": panel["title"]}
649+
)
617650
return related_information
618651

619652

grafana_wtf/model.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,9 @@ def _format_data_node_compact(item: Dict) -> Dict:
120120

121121
def queries_only(self):
122122
"""
123-
Return a representation of data details information, only where query expressions are present.
123+
Return a representation of data details information.
124+
125+
Only where query expressions are present.
124126
"""
125127
# All attributes containing query-likes.
126128
attributes_query_likes = ["expr", "jql", "query", "rawSql", "target"]
@@ -145,7 +147,11 @@ def transform(section):
145147
continue
146148
# Unnest items with nested "query" slot.
147149
for slot in ["query", "target"]:
148-
if slot in new_item and isinstance(new_item[slot], dict) and "query" in new_item[slot]:
150+
if (
151+
slot in new_item
152+
and isinstance(new_item[slot], dict)
153+
and "query" in new_item[slot]
154+
):
149155
new_item["query"] = new_item[slot]["query"]
150156
new_items.append(new_item)
151157
return new_items
@@ -196,6 +202,7 @@ def validate(cls, data: dict):
196202
{data}
197203
""".strip(),
198204
UserWarning,
205+
stacklevel=2,
199206
)
200207
del data["datasource"]
201208

grafana_wtf/report/data.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def serialize_results(output_format: str, results: List):
2828

2929

3030
class DataSearchReport(TabularSearchReport):
31-
def __init__(self, grafana_url, verbose=False, format=None):
31+
def __init__(self, grafana_url, verbose=False, format=None): # noqa: A002
3232
self.grafana_url = grafana_url
3333
self.verbose = verbose
3434
self.format = format
@@ -42,7 +42,11 @@ def display(self, expression, result):
4242
grafana=self.grafana_url,
4343
expression=expression,
4444
),
45-
datasources=self.get_output_items("Datasource", result.datasources, self.compute_url_datasource),
46-
dashboards=self.get_output_items("Dashboard", result.dashboards, self.compute_url_dashboard),
45+
datasources=self.get_output_items(
46+
"Datasource", result.datasources, self.compute_url_datasource
47+
),
48+
dashboards=self.get_output_items(
49+
"Dashboard", result.dashboards, self.compute_url_dashboard
50+
),
4751
)
4852
output_results(self.format, output)

0 commit comments

Comments
 (0)