Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 3 additions & 8 deletions src/sentry/incidents/grouptype.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,12 @@
from sentry.issues.grouptype import GroupCategory, GroupType
from sentry.ratelimits.sliding_windows import Quota
from sentry.types.group import PriorityLevel
from sentry.workflow_engine.models import DataPacket
from sentry.workflow_engine.processors.detector import DetectorEvaluationResult, DetectorHandler
from sentry.workflow_engine.processors.detector import StatefulDetectorHandler


# TODO: This will be a stateful detector when we build that abstraction
class MetricAlertDetectorHandler(DetectorHandler[QuerySubscriptionUpdate]):
def evaluate(
self, data_packet: DataPacket[QuerySubscriptionUpdate]
) -> list[DetectorEvaluationResult]:
# TODO: Implement
return []
class MetricAlertDetectorHandler(StatefulDetectorHandler[QuerySubscriptionUpdate]):
pass


# Example GroupType and detector handler for metric alerts. We don't create these issues yet, but we'll use something
Expand Down
8 changes: 7 additions & 1 deletion src/sentry/workflow_engine/models/detector.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from __future__ import annotations

import builtins
import logging
from typing import TYPE_CHECKING

Expand All @@ -9,6 +10,7 @@
from sentry.backup.scopes import RelocationScope
from sentry.db.models import DefaultFieldsModel, FlexibleForeignKey, region_silo_model
from sentry.issues import grouptype
from sentry.issues.grouptype import GroupType
from sentry.models.owner_base import OwnerModel

if TYPE_CHECKING:
Expand Down Expand Up @@ -53,9 +55,13 @@ def project_id(self):
# XXX: Temporary property until we add `project_id` to the model.
return 1

@property
def group_type(self) -> builtins.type[GroupType] | None:
return grouptype.registry.get_by_slug(self.type)

@property
def detector_handler(self) -> DetectorHandler | None:
group_type = grouptype.registry.get_by_slug(self.type)
group_type = self.group_type
if not group_type:
logger.error(
"No registered grouptype for detector",
Expand Down
45 changes: 22 additions & 23 deletions src/sentry/workflow_engine/processors/detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from sentry.issues.producer import PayloadType, produce_occurrence_to_kafka
from sentry.issues.status_change_message import StatusChangeMessage
from sentry.models.group import GroupStatus
from sentry.types.group import PriorityLevel
from sentry.utils import metrics, redis
from sentry.utils.function_cache import cache_func_for_models
from sentry.utils.iterators import chunked
Expand Down Expand Up @@ -45,7 +46,7 @@ class DetectorEvaluationResult:

def process_detectors(
data_packet: DataPacket, detectors: list[Detector]
) -> list[tuple[Detector, list[DetectorEvaluationResult]]]:
) -> list[tuple[Detector, dict[DetectorGroupKey, DetectorEvaluationResult]]]:
results = []

for detector in detectors:
Expand All @@ -55,25 +56,11 @@ def process_detectors(
continue

detector_results = handler.evaluate(data_packet)
detector_group_keys = set()

for result in detector_results:
if result.group_key in detector_group_keys:
# This shouldn't happen - log an error and continue on, but we should investigate this.
logger.error(
"Duplicate detector state group keys found",
extra={
"detector_id": detector.id,
"group_key": result.group_key,
},
)
continue

for result in detector_results.values():
if result.result is not None:
create_issue_occurrence_from_result(result)

detector_group_keys.add(result.group_key)

if detector_results:
results.append((detector, detector_results))

Expand Down Expand Up @@ -136,7 +123,9 @@ def __init__(self, detector: Detector):
self.conditions = []

@abc.abstractmethod
def evaluate(self, data_packet: DataPacket[T]) -> list[DetectorEvaluationResult]:
def evaluate(
self, data_packet: DataPacket[T]
) -> dict[DetectorGroupKey, DetectorEvaluationResult]:
pass

def commit_state_updates(self):
Expand Down Expand Up @@ -174,6 +163,12 @@ def get_group_key_values(self, data_packet: DataPacket[T]) -> dict[str, int]:
"""
pass

@abc.abstractmethod
def build_occurrence_and_event_data(
self, group_key: DetectorGroupKey, value: int, new_status: PriorityLevel
) -> tuple[IssueOccurrence, dict[str, Any]]:
pass

Comment on lines +166 to +171
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

New build_occurrence_and_event_data hook and PriorityLevel cast — verify enum alignment and subclasses

The new build_occurrence_and_event_data abstract method plus the updated evaluate/evaluate_group_key_value flow give a clear extension point for detectors to construct IssueOccurrence + event payloads, which is a nice separation of concerns.

Two follow-ups are worth tightening:

  1. Enum cast correctness
result, event_data = self.build_occurrence_and_event_data(
    group_key, value, PriorityLevel(new_status)
)

Here new_status is a DetectorPriorityLevel. This relies on the underlying integer values lining up with PriorityLevel so the cast is valid. If they ever diverge, you’ll get ValueError or incorrect priorities on the occurrences.

It might be safer long term to provide an explicit converter (e.g. a helper that maps DetectorPriorityLevelPriorityLevel) rather than relying on raw IntEnum value equality.

  1. Subclass completeness

Adding build_occurrence_and_event_data as an abstract method means every concrete StatefulDetectorHandler subclass must now implement it; otherwise those classes remain abstract and can’t be instantiated. Tests cover MockDetectorStateHandler, but it’s worth checking for any production subclasses that might now be missing an implementation.

You can scan for subclasses to verify they all override this hook:

Also applies to: 226-245, 285-308


🏁 Script executed:

#!/bin/bash
# Find classes that subclass StatefulDetectorHandler and show their definitions
ast-grep --pattern $'class $C(StatefulDetectorHandler[$T]) {\n  $$$\n}' -n -C3

Length of output: 177


I'll correct the script and verify the concerns from the review comment.


---

🏁 Script executed:

```shell
#!/bin/bash
# Find classes that subclass StatefulDetectorHandler (corrected ast-grep syntax)
ast-grep --pattern 'class $C(StatefulDetectorHandler[$T]) { $$$ }'

Length of output: 0


🏁 Script executed:

#!/bin/bash
# Find all classes that inherit from StatefulDetectorHandler using rg
rg --type-add 'py:*.py' -n 'class\s+\w+.*StatefulDetectorHandler' -A 2

Length of output: 490


🏁 Script executed:

#!/bin/bash
# Search for DetectorPriorityLevel and PriorityLevel definitions and usage
rg --type-add 'py:*.py' -n 'DetectorPriorityLevel|PriorityLevel' src/sentry/workflow_engine/ -B 2 -A 2

Length of output: 10376


🏁 Script executed:

#!/bin/bash
# Examine the detector.py file around specified lines
cat src/sentry/workflow_engine/processors/detector.py | head -320 | tail -160

Length of output: 6948


🏁 Script executed:

#!/bin/bash
# Search for build_occurrence_and_event_data implementations
rg --type-add 'py:*.py' -n 'build_occurrence_and_event_data' -B 2 -A 5

Length of output: 2241


I need to check the full implementation of MetricAlertDetectorHandler and verify the PriorityLevel enum values.


🏁 Script executed:

#!/bin/bash
# Get full MetricAlertDetectorHandler class definition
cat src/sentry/incidents/grouptype.py

Length of output: 1073


🏁 Script executed:

#!/bin/bash
# Find PriorityLevel enum definition
rg --type-add 'py:*.py' -n 'class PriorityLevel' -A 10

Length of output: 2316


🏁 Script executed:

#!/bin/bash
# Check if MetricAlertDetectorHandler inherits any abstract methods or if it's instantiated anywhere
rg --type-add 'py:*.py' -n 'MetricAlertDetectorHandler' -B 2 -A 5

Length of output: 865


Enum cast will fail on DetectorPriorityLevel.OK; MetricAlertDetectorHandler missing required implementation

The PriorityLevel(new_status) cast at line 300 has a real alignment problem. PriorityLevel enum only defines LOW (25), MEDIUM (50), and HIGH (75), but DetectorPriorityLevel.OK has value 0. When new_status == DetectorPriorityLevel.OK, the cast will raise ValueError.

Additionally, MetricAlertDetectorHandler in src/sentry/incidents/grouptype.py:11 is a concrete subclass but only contains pass—it never implements the required abstract method build_occurrence_and_event_data. Although marked with a TODO comment suggesting future work, it's currently referenced as detector_handler in MetricAlertFire and cannot be instantiated.

Recommended fixes:

  1. Add an explicit converter from DetectorPriorityLevelPriorityLevel that handles OK (e.g., map it to a safe default or raise a clearer error).
  2. Either implement build_occurrence_and_event_data in MetricAlertDetectorHandler or make it explicitly abstract (or stub it with NotImplementedError).
🤖 Prompt for AI Agents
In src/sentry/workflow_engine/processors/detector.py around lines 166–171 (and
where PriorityLevel(new_status) is used around line 300), stop directly casting
DetectorPriorityLevel into PriorityLevel; add a small converter function that
maps DetectorPriorityLevel.OK (0) to a safe PriorityLevel (e.g.,
PriorityLevel.LOW) or returns None/raises a clear ValueError with context, and
use that converter instead of direct enum construction to avoid ValueError on
OK. In src/sentry/incidents/grouptype.py (MetricAlertDetectorHandler) implement
the required abstract method build_occurrence_and_event_data (or explicitly make
the class abstract / raise NotImplementedError) so MetricAlertDetectorHandler
cannot be instantiated without a concrete implementation; if you choose a stub,
raise NotImplementedError with a clear message referencing required behavior.

def build_fingerprint(self, group_key) -> list[str]:
"""
Builds a fingerprint to uniquely identify a detected issue
Expand Down Expand Up @@ -228,7 +223,9 @@ def get_state_data(
)
return results

def evaluate(self, data_packet: DataPacket[T]) -> list[DetectorEvaluationResult]:
def evaluate(
self, data_packet: DataPacket[T]
) -> dict[DetectorGroupKey, DetectorEvaluationResult]:
"""
Evaluates a given data packet and returns a list of `DetectorEvaluationResult`.
There will be one result for each group key result in the packet, unless the
Expand All @@ -237,13 +234,13 @@ def evaluate(self, data_packet: DataPacket[T]) -> list[DetectorEvaluationResult]
dedupe_value = self.get_dedupe_value(data_packet)
group_values = self.get_group_key_values(data_packet)
all_state_data = self.get_state_data(list(group_values.keys()))
results = []
results = {}
for group_key, group_value in group_values.items():
result = self.evaluate_group_key_value(
group_key, group_value, all_state_data[group_key], dedupe_value
)
if result:
results.append(result)
results[result.group_key] = result
return results

def evaluate_group_key_value(
Expand Down Expand Up @@ -289,7 +286,7 @@ def evaluate_group_key_value(
is_active = new_status != DetectorPriorityLevel.OK
self.enqueue_state_update(group_key, is_active, new_status)
event_data = None
result = None
result: StatusChangeMessage | IssueOccurrence
if new_status == DetectorPriorityLevel.OK:
# If we've determined that we're now ok, we just want to resolve the issue
result = StatusChangeMessage(
Expand All @@ -298,8 +295,10 @@ def evaluate_group_key_value(
new_status=GroupStatus.RESOLVED,
new_substatus=None,
)

# TODO: Add hook here for generating occurrence
else:
result, event_data = self.build_occurrence_and_event_data(
group_key, value, PriorityLevel(new_status)
)
return DetectorEvaluationResult(
group_key=group_key,
is_active=is_active,
Expand Down
Loading