Skip to content

Commit 80d3de2

Browse files
committed
fix(ci): satisfy ruff linting requirements
1 parent 014b548 commit 80d3de2

File tree

14 files changed

+85
-52
lines changed

14 files changed

+85
-52
lines changed

agent_pm/clients/pagerduty_client.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,9 @@ def __init__(self) -> None:
1717
def enabled(self) -> bool:
1818
return bool(self.routing_key)
1919

20-
async def trigger_incident(self, summary: str, source: str = "agent-pm", severity: str = "error", **details: Any) -> dict[str, Any]:
20+
async def trigger_incident(
21+
self, summary: str, source: str = "agent-pm", severity: str = "error", **details: Any
22+
) -> dict[str, Any]:
2123
if not self.enabled:
2224
return {"dry_run": True, "summary": summary, "details": details}
2325

agent_pm/connectors/email.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import asyncio
66
import json
77
import logging
8-
from datetime import UTC, datetime
8+
from datetime import datetime
99
from pathlib import Path
1010
from typing import Any
1111

agent_pm/observability/dashboard.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,9 @@
44

55
from dataclasses import dataclass
66

7+
from agent_pm.settings import settings
78
from agent_pm.storage.redis import count_dead_letters
89
from agent_pm.storage.tasks import get_task_queue
9-
from agent_pm.settings import settings
1010

1111

1212
@dataclass
@@ -21,4 +21,8 @@ async def gather_queue_health() -> QueueHealth:
2121
client = getattr(queue, "_redis", None)
2222
dead_letters = await count_dead_letters(client) if client else 0
2323
auto_triage_enabled = bool(settings.task_queue_auto_requeue_errors)
24-
return QueueHealth(queue_name=getattr(queue, "queue_name", "unknown"), dead_letters=dead_letters, auto_triage_enabled=auto_triage_enabled)
24+
return QueueHealth(
25+
queue_name=getattr(queue, "queue_name", "unknown"),
26+
dead_letters=dead_letters,
27+
auto_triage_enabled=auto_triage_enabled,
28+
)

agent_pm/observability/metrics.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77

88
from prometheus_client import Counter, Gauge, Histogram, Summary, generate_latest
99

10-
1110
dead_letter_recorded_total = Counter(
1211
"task_dead_letter_recorded_total",
1312
"Dead-letter entries recorded",

agent_pm/settings.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,9 @@ def _parse_csv_list(value: str | list[str] | None) -> list[str]:
5353
return [item.strip() for item in value.split(",") if item.strip()]
5454
return value
5555

56-
@field_validator("github_repositories", "slack_sync_channels", "gmail_label_filter", "notion_database_ids", mode="before")
56+
@field_validator(
57+
"github_repositories", "slack_sync_channels", "gmail_label_filter", "notion_database_ids", mode="before"
58+
)
5759
@classmethod
5860
def _parse_string_lists(cls, value):
5961
return cls._parse_csv_list(value)

agent_pm/storage/syncs.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -45,11 +45,7 @@ async def record_sync(
4545
async def list_recent_syncs(limit: int = 50) -> list[dict[str, Any]]:
4646
session_factory = get_session_factory()
4747
async with session_factory() as session:
48-
stmt = (
49-
select(ConnectorSync)
50-
.order_by(desc(ConnectorSync.started_at))
51-
.limit(max(limit, 1))
52-
)
48+
stmt = select(ConnectorSync).order_by(desc(ConnectorSync.started_at)).limit(max(limit, 1))
5349
try:
5450
result = await session.execute(stmt)
5551
except Exception:

agent_pm/storage/tasks.py

Lines changed: 42 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
from enum import Enum
1414
from typing import Any
1515

16+
from ..clients import slack_client
1617
from ..observability.metrics import (
1718
dead_letter_active_gauge,
1819
dead_letter_alert_total,
@@ -25,15 +26,12 @@
2526
record_task_latency,
2627
)
2728
from ..settings import settings
28-
from ..utils.datetime import utc_now
29-
from ..clients import pagerduty_client, slack_client
3029
from ..tasks.playbooks import run_playbook
30+
from ..utils.datetime import utc_now
3131
from .redis import (
32-
append_dead_letter_audit,
3332
clear_dead_letter,
3433
count_dead_letters,
3534
delete_retry_policy,
36-
enqueue_task as redis_enqueue_task,
3735
fetch_dead_letter_audit,
3836
fetch_dead_letters,
3937
get_dead_letter,
@@ -48,6 +46,9 @@
4846
set_task_result,
4947
write_heartbeat,
5048
)
49+
from .redis import (
50+
enqueue_task as redis_enqueue_task,
51+
)
5152

5253
logger = logging.getLogger(__name__)
5354

@@ -332,12 +333,22 @@ async def _worker(self, worker_id: int):
332333
alert_channel = settings.task_queue_alert_channel or settings.slack_status_channel
333334
cooldown = timedelta(minutes=settings.task_queue_alert_cooldown_minutes)
334335

335-
def _should_auto_requeue(err_type: str | None) -> bool:
336+
def _should_auto_requeue(
337+
err_type: str | None,
338+
*,
339+
auto_errors: set[str] = auto_errors,
340+
) -> bool:
336341
if not err_type:
337342
return False
338343
return err_type in auto_errors
339344

340-
def _record_failure(err_type: str | None, task_identifier: str) -> bool:
345+
def _record_failure(
346+
err_type: str | None,
347+
task_identifier: str,
348+
*,
349+
alert_window: timedelta = alert_window,
350+
alert_threshold: int = alert_threshold,
351+
) -> bool:
341352
if not err_type:
342353
return False
343354
now = utc_now()
@@ -348,13 +359,17 @@ def _record_failure(err_type: str | None, task_identifier: str) -> bool:
348359
recent_failures[key] = [ts for ts in entries if ts >= cutoff]
349360
return len(recent_failures[key]) >= alert_threshold
350361

351-
async def _send_alert(error_type: str, payload: dict[str, Any]) -> None:
362+
async def _send_alert(
363+
error_type: str,
364+
payload: dict[str, Any],
365+
*,
366+
alert_channel: str | None = alert_channel,
367+
cooldown: timedelta = cooldown,
368+
) -> None:
352369
if not alert_channel:
353370
return
354371
if settings.dry_run or not slack_client.enabled:
355-
logger.warning(
356-
"Slack alert skipped (dry run): %s", error_type
357-
)
372+
logger.warning("Slack alert skipped (dry run): %s", error_type)
358373
return
359374
now = utc_now()
360375
last_sent = last_alert_sent.get(error_type)
@@ -373,19 +388,24 @@ async def _send_alert(error_type: str, payload: dict[str, Any]) -> None:
373388
except Exception as exc: # pragma: no cover - logging
374389
logger.error("Failed to send Slack alert: %s", exc)
375390

376-
async def _apply_adaptive_policy(task_name: str) -> None:
391+
async def _apply_adaptive_policy(
392+
task_name: str,
393+
task_payload: dict[str, Any],
394+
) -> None:
377395
samples = failure_metrics.get(task_name, [])
378396
if len(samples) < settings.task_queue_adaptive_min_samples:
379397
return
380398
failure_rate = 1 - (sum(1 for success in samples if success) / len(samples))
381399
if failure_rate < settings.task_queue_adaptive_failure_threshold:
382400
return
383401
policy = await get_retry_policy(self._redis, task_name) or {}
384-
policy.setdefault("max_retries", payload.get("max_retries", 3))
402+
policy.setdefault("max_retries", task_payload.get("max_retries", 3))
385403
policy.setdefault("timeout", self._task_timeout)
386404
policy.setdefault("backoff_base", self._backoff_base)
387405
policy.setdefault("backoff_max", self._backoff_max)
388-
policy["max_retries"] = min(int(policy["max_retries"]) + 1, settings.task_queue_max_auto_requeues)
406+
policy["max_retries"] = min(
407+
int(policy["max_retries"]) + 1, settings.task_queue_max_auto_requeues
408+
)
389409
policy["timeout"] = float(policy.get("timeout", self._task_timeout)) + 5.0
390410
await set_retry_policy(self._redis, task_name, policy)
391411
failure_metrics[task_name] = []
@@ -462,7 +482,9 @@ async def _apply_adaptive_policy(task_name: str) -> None:
462482
).inc()
463483
record_task_completion(self.queue_name, TaskStatus.FAILED.value)
464484
record_task_latency(self.queue_name, (utc_now() - start).total_seconds())
465-
identifier = payload.get("metadata", {}).get("workflow_id") or payload.get("name", "unknown")
485+
identifier = payload.get("metadata", {}).get("workflow_id") or payload.get(
486+
"name", "unknown"
487+
)
466488
playbook_name = settings.task_queue_playbooks.get(error_type)
467489
if playbook_name:
468490
await run_playbook(playbook_name, payload, self, error_type)
@@ -480,7 +502,7 @@ async def _apply_adaptive_policy(task_name: str) -> None:
480502
payload = auto_payload
481503
metrics = failure_metrics.setdefault(name, [])
482504
metrics.append(False)
483-
await _apply_adaptive_policy(name)
505+
await _apply_adaptive_policy(name, payload)
484506
if _record_failure(error_type, identifier):
485507
await _send_alert(error_type, payload)
486508
continue
@@ -492,13 +514,13 @@ async def _apply_adaptive_policy(task_name: str) -> None:
492514
await asyncio.sleep(backoff)
493515
metrics = failure_metrics.setdefault(name, [])
494516
metrics.append(False)
495-
await _apply_adaptive_policy(name)
517+
await _apply_adaptive_policy(name, payload)
496518
await redis_enqueue_task(self._redis, name, payload)
497519
continue
498520

499521
metrics = failure_metrics.setdefault(name, [])
500522
metrics.append(True)
501-
await _apply_adaptive_policy(name)
523+
await _apply_adaptive_policy(name, payload)
502524
await set_task_result(self._redis, task_id, {"status": "completed", "result": result})
503525
record_task_completion(self.queue_name, TaskStatus.COMPLETED.value)
504526
record_task_latency(self.queue_name, (utc_now() - start).total_seconds())
@@ -588,7 +610,9 @@ async def purge_dead_letters(self, *, older_than: timedelta | None = None) -> in
588610
if older_than is None:
589611
deleted = await purge_dead_letters(self._redis)
590612
dead_letter_purged_total.labels(queue=self.queue_name, mode="all").inc(deleted)
591-
dead_letter_active_gauge.labels(queue=self.queue_name).set(await count_dead_letters(self._redis))
613+
dead_letter_active_gauge.labels(queue=self.queue_name).set(
614+
await count_dead_letters(self._redis)
615+
)
592616
return deleted
593617
cutoff = utc_now() - older_than
594618
deleted = await purge_dead_letters(self._redis, older_than=cutoff)

agent_pm/tasks/playbooks.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@
33
from __future__ import annotations
44

55
import logging
6-
from typing import Any, Awaitable, Callable
6+
from collections.abc import Awaitable, Callable
7+
from typing import Any
78

89
import httpx
910

@@ -50,7 +51,8 @@ async def _invoke_webhook(payload: dict[str, Any], queue: Any, error_type: str)
5051

5152
async def _log_only(payload: dict[str, Any], queue: Any, error_type: str) -> None:
5253
logger.warning(
53-
"Remediation playbook invoked", extra={"queue": getattr(queue, "queue_name", "unknown"), "error": error_type, "task": payload.get("task_id")}
54+
"Remediation playbook invoked",
55+
extra={"queue": getattr(queue, "queue_name", "unknown"), "error": error_type, "task": payload.get("task_id")},
5456
)
5557

5658

agent_pm/tasks/sync.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,9 @@
55
import asyncio
66
import logging
77
import time
8+
from collections.abc import Iterable
89
from dataclasses import dataclass, field
910
from datetime import UTC, datetime
10-
from typing import Iterable
1111

1212
from agent_pm.connectors import (
1313
CalendarConnector,
@@ -136,7 +136,9 @@ async def _record_sync(
136136
metadata={"started_at": started_at.isoformat()},
137137
)
138138
except Exception: # pragma: no cover - defensive logging
139-
logger.exception("Failed to persist connector sync record", extra={"connector": connector, "status": status})
139+
logger.exception(
140+
"Failed to persist connector sync record", extra={"connector": connector, "status": status}
141+
)
140142

141143

142144
def create_default_sync_manager() -> PeriodicSyncManager:

app.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,10 @@
55
import asyncio
66
import logging
77
import uuid
8+
from contextlib import asynccontextmanager
89
from datetime import datetime, timedelta
910
from typing import Any
1011

11-
from contextlib import asynccontextmanager
12-
1312
from fastapi import Depends, FastAPI, HTTPException, WebSocket, WebSocketDisconnect
1413
from fastapi.responses import PlainTextResponse
1514
from pydantic import BaseModel, model_validator
@@ -73,6 +72,7 @@
7372
from agent_pm.tasks.sync import PeriodicSyncManager, create_default_sync_manager
7473
from agent_pm.tools import registry
7574

75+
7676
@asynccontextmanager
7777
async def lifespan(_app: FastAPI):
7878
global _task_queue, _sync_manager
@@ -543,9 +543,7 @@ async def delete_dead_letter(task_id: str, _admin_key: AdminKeyDep = None) -> di
543543

544544

545545
@app.delete("/tasks/dead-letter")
546-
async def purge_dead_letters(
547-
older_than_minutes: int | None = None, _admin_key: AdminKeyDep = None
548-
) -> dict[str, int]:
546+
async def purge_dead_letters(older_than_minutes: int | None = None, _admin_key: AdminKeyDep = None) -> dict[str, int]:
549547
task_queue = await get_task_queue()
550548
if older_than_minutes is None:
551549
deleted = await task_queue.purge_dead_letters()

0 commit comments

Comments
 (0)