Skip to content

Commit 3af6166

Browse files
authored
ref(feedback): add save_event_feedback metric and cleanup other ingest metrics (#97741)
- Adds a timer metric for the task feedback v2 ingest runs in - `save_event_feedback`. (The v1 equivalent is `ingest_consumer.process_userreport`) - Rm all `sample_rate=1.0`s. Digging into metrics code, you can see this is only used for internal metrics, which we don't use anymore. - cleans up some unused metrics (see comments)
1 parent 63faf61 commit 3af6166

File tree

6 files changed

+16
-34
lines changed

6 files changed

+16
-34
lines changed

src/sentry/feedback/usecases/ingest/create_feedback.py

Lines changed: 10 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -98,17 +98,19 @@ def fix_for_issue_platform(event_data: dict[str, Any]) -> dict[str, Any]:
9898

9999
ret_event["contexts"] = event_data.get("contexts", {})
100100

101-
# TODO: remove this once feedback_ingest API deprecated
102-
# as replay context will be filled in
101+
# TODO: investigate if this can be removed. If the frontend looks in both
102+
# feedback and replay context for replay_id, this might not be needed.
103103
if not event_data["contexts"].get("replay") and event_data["contexts"].get("feedback", {}).get(
104104
"replay_id"
105105
):
106-
# Temporary metric to confirm this behavior is no longer needed.
107-
metrics.incr("feedback.create_feedback_issue.filled_missing_replay_context")
108-
106+
# This metric confirms this block is still entered.
107+
metrics.incr(
108+
"feedback.create_feedback_issue.filled_missing_replay_context",
109+
)
109110
ret_event["contexts"]["replay"] = {
110111
"replay_id": event_data["contexts"].get("feedback", {}).get("replay_id")
111112
}
113+
112114
ret_event["event_id"] = event_data["event_id"]
113115

114116
ret_event["platform"] = event_data.get("platform", "other")
@@ -162,11 +164,7 @@ def validate_issue_platform_event_schema(event_data):
162164
The issue platform schema validation does not run in dev atm so we have to do the validation
163165
ourselves, or else our tests are not representative of what happens in prod.
164166
"""
165-
try:
166-
jsonschema.validate(event_data, EVENT_PAYLOAD_SCHEMA)
167-
except jsonschema.exceptions.ValidationError:
168-
metrics.incr("feedback.create_feedback_issue.invalid_schema")
169-
raise
167+
jsonschema.validate(event_data, EVENT_PAYLOAD_SCHEMA)
170168

171169

172170
def should_filter_feedback(event: dict) -> tuple[bool, str | None]:
@@ -214,13 +212,6 @@ def create_feedback_issue(
214212
Returns the formatted event data that was sent to issue platform.
215213
"""
216214

217-
metrics.incr(
218-
"feedback.create_feedback_issue.entered",
219-
tags={
220-
"referrer": source.value,
221-
},
222-
)
223-
224215
should_filter, filter_reason = should_filter_feedback(event)
225216
if should_filter:
226217
if filter_reason == "too_large":
@@ -279,13 +270,14 @@ def create_feedback_issue(
279270
except Exception:
280271
# until we have LLM error types ironed out, just catch all exceptions
281272
logger.exception("Error checking if message is spam", extra={"project_id": project.id})
273+
274+
# In DD we use is_spam = None to indicate spam failed.
282275
metrics.incr(
283276
"feedback.create_feedback_issue.spam_detection",
284277
tags={
285278
"is_spam": is_message_spam,
286279
"referrer": source.value,
287280
},
288-
sample_rate=1.0,
289281
)
290282

291283
# Prepare the data for issue platform processing and attach useful tags.
@@ -411,7 +403,6 @@ def create_feedback_issue(
411403
"referrer": source.value,
412404
"platform": project.platform,
413405
},
414-
sample_rate=1.0,
415406
)
416407

417408
track_outcome(

src/sentry/feedback/usecases/ingest/save_event_feedback.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@
1515

1616
def save_event_feedback(event_data: Mapping[str, Any], project_id: int):
1717
"""Saves feedback given data in an event format. This function should only
18-
be called by the new feedback consumer's ingest strategy, to process
19-
feedback envelopes (feedback v2). It is currently instrumented as a task in
18+
be called by the feedback consumer's ingest strategy, to process
19+
event envelopes (feedback v2). It is currently called in a task in
2020
sentry.tasks.store.
2121
2222
If the save is successful and the `associated_event_id` field is present,

src/sentry/feedback/usecases/label_generation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ class LabelRequest(TypedDict):
2626
SEER_GENERATE_LABELS_URL = f"{settings.SEER_AUTOFIX_URL}/v1/automation/summarize/feedback/labels"
2727

2828

29-
@metrics.wraps("feedback.generate_labels", sample_rate=1.0)
29+
@metrics.wraps("feedback.generate_labels")
3030
def generate_labels(feedback_message: str, organization_id: int) -> list[str]:
3131
"""
3232
Generate labels for a feedback message.

src/sentry/feedback/usecases/spam_detection.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def make_input_prompt(message: str):
3232
**Classify:** """
3333

3434

35-
@metrics.wraps("feedback.spam_detection", sample_rate=1.0)
35+
@metrics.wraps("feedback.spam_detection")
3636
def is_spam(message: str):
3737
labeled_spam = False
3838
response = complete_prompt(

src/sentry/feedback/usecases/title_generation.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -26,17 +26,9 @@ class GenerateFeedbackTitleRequest(TypedDict):
2626
def should_get_ai_title(organization: Organization) -> bool:
2727
"""Check if AI title generation should be used for the given organization."""
2828
if not features.has("organizations:gen-ai-features", organization):
29-
metrics.incr(
30-
"feedback.ai_title_generation.skipped",
31-
tags={"reason": "gen_ai_disabled"},
32-
)
3329
return False
3430

3531
if not features.has("organizations:user-feedback-ai-titles", organization):
36-
metrics.incr(
37-
"feedback.ai_title_generation.skipped",
38-
tags={"reason": "feedback_ai_titles_disabled"},
39-
)
4032
return False
4133

4234
return True
@@ -75,6 +67,7 @@ def format_feedback_title(title: str, max_words: int = 10) -> str:
7567
return title
7668

7769

70+
@metrics.wraps("feedback.ai_title_generation")
7871
def get_feedback_title_from_seer(feedback_message: str, organization_id: int) -> str | None:
7972
"""
8073
Generate an AI-powered title for user feedback using Seer, or None if generation fails.
@@ -119,9 +112,6 @@ def get_feedback_title_from_seer(feedback_message: str, organization_id: int) ->
119112
)
120113
return None
121114

122-
metrics.incr(
123-
"feedback.ai_title_generation.success",
124-
)
125115
return title
126116

127117

src/sentry/tasks/store.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -717,6 +717,7 @@ def save_event_transaction(
717717
processing_deadline_duration=65,
718718
),
719719
)
720+
@metrics.wraps("feedback_consumer.save_event_feedback_task")
720721
def save_event_feedback(
721722
cache_key: str | None = None,
722723
start_time: float | None = None,

0 commit comments

Comments
 (0)