diff --git a/devservices/config.yml b/devservices/config.yml
index 51f67a698ec..2a448bc5502 100644
--- a/devservices/config.yml
+++ b/devservices/config.yml
@@ -20,6 +20,13 @@ x-sentry-service-config:
branch: master
repo_link: https://github.com/getsentry/snuba.git
mode: containerized-profiles
+ snuba-metrics:
+ description: Service that provides fast aggregation and query capabilities on top of Clickhouse that includes metrics consumers
+ remote:
+ repo_name: snuba
+ branch: master
+ repo_link: https://github.com/getsentry/snuba.git
+ mode: containerized-metrics-dev
relay:
description: Service event forwarding and ingestion service
remote:
@@ -121,6 +128,15 @@ x-sentry-service-config:
description: Post-process forwarder for transaction events
post-process-forwarder-issue-platform:
description: Post-process forwarder for issue platform events
+ # Subscription results consumers
+ eap-spans-subscription-results:
+ description: Kafka consumer for processing subscription results for spans
+ subscription-results-eap-items:
+ description: Kafka consumer for processing subscription results for eap items
+ metrics-subscription-results:
+ description: Kafka consumer for processing subscription results for metrics
+ generic-metrics-subscription-results:
+ description: Kafka consumer for processing subscription results for generic metrics
# Uptime monitoring
uptime-results:
description: Kafka consumer for uptime monitoring results
@@ -138,6 +154,29 @@ x-sentry-service-config:
rabbitmq: [postgres, snuba, rabbitmq, spotlight]
symbolicator: [postgres, snuba, symbolicator, spotlight]
memcached: [postgres, snuba, memcached, spotlight]
+ tracing:
+ [
+ postgres,
+ snuba-metrics,
+ relay,
+ spotlight,
+ ingest-events,
+ ingest-transactions,
+ ingest-metrics,
+ ingest-generic-metrics,
+ billing-metrics-consumer,
+ post-process-forwarder-errors,
+ post-process-forwarder-transactions,
+ post-process-forwarder-issue-platform,
+ eap-spans-subscription-results,
+ subscription-results-eap-items,
+ metrics-subscription-results,
+ generic-metrics-subscription-results,
+ process-spans,
+ ingest-occurrences,
+ process-segments,
+ worker,
+ ]
crons:
[
postgres,
@@ -283,6 +322,14 @@ x-programs:
command: sentry run consumer post-process-forwarder-issue-platform --consumer-group=sentry-consumer --auto-offset-reset=latest --no-strict-offset-reset
ingest-feedback-events:
command: sentry run consumer ingest-feedback-events --consumer-group=sentry-consumer --auto-offset-reset=latest --no-strict-offset-reset
+ eap-spans-subscription-results:
+ command: sentry run consumer eap-spans-subscription-results --consumer-group=sentry-consumer --auto-offset-reset=latest --no-strict-offset-reset
+ subscription-results-eap-items:
+ command: sentry run consumer subscription-results-eap-items --consumer-group=sentry-consumer --auto-offset-reset=latest --no-strict-offset-reset
+ metrics-subscription-results:
+ command: sentry run consumer metrics-subscription-results --consumer-group=sentry-consumer --auto-offset-reset=latest --no-strict-offset-reset
+ generic-metrics-subscription-results:
+ command: sentry run consumer generic-metrics-subscription-results --consumer-group=sentry-consumer --auto-offset-reset=latest --no-strict-offset-reset
worker:
command: sentry run worker -c 1 --autoreload
diff --git a/src/sentry/constants.py b/src/sentry/constants.py
index c5374e1dcb0..d2ef563c826 100644
--- a/src/sentry/constants.py
+++ b/src/sentry/constants.py
@@ -721,7 +721,7 @@ class InsightModules(Enum):
TARGET_SAMPLE_RATE_DEFAULT = 1.0
SAMPLING_MODE_DEFAULT = "organization"
ROLLBACK_ENABLED_DEFAULT = True
-DEFAULT_AUTOFIX_AUTOMATION_TUNING_DEFAULT = "low"
+DEFAULT_AUTOFIX_AUTOMATION_TUNING_DEFAULT = "off"
DEFAULT_SEER_SCANNER_AUTOMATION_DEFAULT = False
INGEST_THROUGH_TRUSTED_RELAYS_ONLY_DEFAULT = False
diff --git a/src/sentry/feedback/usecases/feedback_summaries.py b/src/sentry/feedback/usecases/feedback_summaries.py
index 27000dd16ee..7da2167d11e 100644
--- a/src/sentry/feedback/usecases/feedback_summaries.py
+++ b/src/sentry/feedback/usecases/feedback_summaries.py
@@ -10,12 +10,14 @@
def make_input_prompt(
feedbacks,
):
- feedbacks_string = "\n".join(f"- {msg}" for msg in feedbacks)
+ feedbacks_string = "\n------\n".join(feedbacks)
return f"""Instructions:
-You are an assistant that summarizes customer feedback. Given a list of customer feedback entries, generate a concise summary of 1-2 sentences that reflects the key themes. Begin the summary with "Users...", for example, "Users say...".
+You are an assistant that summarizes customer feedback. Given a list of customer feedback entries, generate a concise summary of 1-2 sentences that reflects the key themes. Begin the summary with "Users...", for example, "Users say...". Don't make overly generic statements like "Users report a variety of issues."
-Balance specificity and generalization based on the size of the input based *only* on the themes and topics present in the list of customer feedback entries. Prioritize brevity and clarity and trying to capture what users are saying, over trying to mention random specific topics. Please don't write overly long sentences, you can leave certain things out and the decision to mention specific topics or themes should be proportional to the number of times they appear in the user feedback entries.
+Balance specificity and generalization based on the size of the input and based only on the themes and topics present in the list of customer feedback entries. Your goal is to focus on identifying and summarizing broader themes that are mentioned more frequently across different feedback entries. For example, if there are many feedback entries, it makes more sense to prioritize mentioning broader themes that apply to many feedbacks, versus mentioning one or two specific isolated concerns and leaving out others that are just as prevalent.
+
+The summary must be AT MOST 55 words, that is an absolute upper limit, and you must write AT MOST two sentences. You can leave certain things out, and when deciding what topics/themes to mention, make sure it is proportional to the number of times they appear in different customer feedback entries.
User Feedbacks:
diff --git a/src/sentry/grouping/parameterization.py b/src/sentry/grouping/parameterization.py
index bfcef71f5d2..c06a719e833 100644
--- a/src/sentry/grouping/parameterization.py
+++ b/src/sentry/grouping/parameterization.py
@@ -11,7 +11,6 @@
"ParameterizationCallableExperiment",
"ParameterizationExperiment",
"ParameterizationRegex",
- "ParameterizationRegexExperiment",
"Parameterizer",
"UniqueIdExperiment",
]
@@ -206,15 +205,6 @@ def run(self, content: str, callback: Callable[[str, int], None]) -> str:
return content
-class ParameterizationRegexExperiment(ParameterizationRegex):
- def run(
- self,
- content: str,
- callback: Callable[[re.Match[str]], str],
- ) -> str:
- return self.compiled_pattern.sub(callback, content)
-
-
class _UniqueId:
# just a namespace for the uniq_id logic, no need to instantiate
@@ -275,7 +265,7 @@ def replace_uniq_ids_in_str(string: str) -> tuple[str, int]:
)
-ParameterizationExperiment = ParameterizationCallableExperiment | ParameterizationRegexExperiment
+ParameterizationExperiment = ParameterizationCallableExperiment
class Parameterizer:
@@ -355,10 +345,8 @@ def _handle_regex_match(match: re.Match[str]) -> str:
for experiment in self._experiments:
if not should_run(experiment.name):
continue
- if isinstance(experiment, ParameterizationCallableExperiment):
- content = experiment.run(content, _incr_counter)
- else:
- content = experiment.run(content, _handle_regex_match)
+
+ content = experiment.run(content, _incr_counter)
return content
diff --git a/src/sentry/hybridcloud/tasks/deliver_webhooks.py b/src/sentry/hybridcloud/tasks/deliver_webhooks.py
index d87dec54a87..bf8c7c51e26 100644
--- a/src/sentry/hybridcloud/tasks/deliver_webhooks.py
+++ b/src/sentry/hybridcloud/tasks/deliver_webhooks.py
@@ -82,6 +82,7 @@ class DeliveryFailed(Exception):
silo_mode=SiloMode.CONTROL,
taskworker_config=TaskworkerConfig(
namespace=hybridcloud_control_tasks,
+ processing_deadline_duration=30,
),
)
def schedule_webhook_delivery() -> None:
@@ -157,6 +158,7 @@ def schedule_webhook_delivery() -> None:
silo_mode=SiloMode.CONTROL,
taskworker_config=TaskworkerConfig(
namespace=hybridcloud_control_tasks,
+ processing_deadline_duration=300,
),
)
def drain_mailbox(payload_id: int) -> None:
@@ -234,6 +236,7 @@ def drain_mailbox(payload_id: int) -> None:
silo_mode=SiloMode.CONTROL,
taskworker_config=TaskworkerConfig(
namespace=hybridcloud_control_tasks,
+ processing_deadline_duration=120,
),
)
def drain_mailbox_parallel(payload_id: int) -> None:
diff --git a/src/sentry/integrations/github/integration.py b/src/sentry/integrations/github/integration.py
index dc0a4382c0b..3506ca32848 100644
--- a/src/sentry/integrations/github/integration.py
+++ b/src/sentry/integrations/github/integration.py
@@ -384,8 +384,6 @@ def get_open_pr_comment_workflow(self) -> OpenPRCommentWorkflow:
Did you find this useful? React with a 👍 or 👎"""
-MERGED_PR_SINGLE_ISSUE_TEMPLATE = "- ‼️ **{title}** `{subtitle}` [View Issue]({url})"
-
class GitHubPRCommentWorkflow(PRCommentWorkflow):
organization_option_key = "sentry:github_pr_bot"
@@ -405,10 +403,10 @@ def get_comment_body(self, issue_ids: list[int]) -> str:
issue_list = "\n".join(
[
- MERGED_PR_SINGLE_ISSUE_TEMPLATE.format(
+ self.get_merged_pr_single_issue_template(
title=issue.title,
- subtitle=self.format_comment_subtitle(issue.culprit or "unknown culprit"),
url=self.format_comment_url(issue.get_absolute_url(), self.referrer_id),
+ environment=self.get_environment_info(issue),
)
for issue in issues
]
diff --git a/src/sentry/integrations/gitlab/integration.py b/src/sentry/integrations/gitlab/integration.py
index c7afbd5f57f..0244c9a811d 100644
--- a/src/sentry/integrations/gitlab/integration.py
+++ b/src/sentry/integrations/gitlab/integration.py
@@ -230,8 +230,6 @@ def get_open_pr_comment_workflow(self) -> OpenPRCommentWorkflow:
{issue_list}"""
-MERGED_PR_SINGLE_ISSUE_TEMPLATE = "- ‼️ **{title}** `{subtitle}` [View Issue]({url})"
-
class GitlabPRCommentWorkflow(PRCommentWorkflow):
organization_option_key = "sentry:gitlab_pr_bot"
@@ -253,10 +251,10 @@ def get_comment_body(self, issue_ids: list[int]) -> str:
issue_list = "\n".join(
[
- MERGED_PR_SINGLE_ISSUE_TEMPLATE.format(
+ self.get_merged_pr_single_issue_template(
title=issue.title,
- subtitle=self.format_comment_subtitle(issue.culprit),
url=self.format_comment_url(issue.get_absolute_url(), self.referrer_id),
+ environment=self.get_environment_info(issue),
)
for issue in issues
]
diff --git a/src/sentry/integrations/source_code_management/commit_context.py b/src/sentry/integrations/source_code_management/commit_context.py
index ce5483df02e..8492b931ec0 100644
--- a/src/sentry/integrations/source_code_management/commit_context.py
+++ b/src/sentry/integrations/source_code_management/commit_context.py
@@ -139,6 +139,10 @@ class PullRequestFile:
patch: str
+ISSUE_TITLE_MAX_LENGTH = 50
+MERGED_PR_SINGLE_ISSUE_TEMPLATE = "* ‼️ [**{title}**]({url}){environment}\n"
+
+
class CommitContextIntegration(ABC):
"""
Base class for integrations that include commit context features: suspect commits, suspect PR comments
@@ -570,6 +574,37 @@ def get_top_5_issues_by_count(
)
return raw_snql_query(request, referrer=self.referrer.value)["data"]
+ @staticmethod
+ def _truncate_title(title: str, max_length: int = ISSUE_TITLE_MAX_LENGTH) -> str:
+ """Truncate title if it's too long and add ellipsis."""
+ if len(title) <= max_length:
+ return title
+ return title[:max_length].rstrip() + "..."
+
+ def get_environment_info(self, issue: Group) -> str:
+ try:
+ recommended_event = issue.get_recommended_event()
+ if recommended_event:
+ environment = recommended_event.get_environment()
+ if environment and environment.name:
+ return f" in `{environment.name}`"
+ except Exception as e:
+ # If anything goes wrong, just continue without environment info
+ logger.info(
+ "get_environment_info.no-environment",
+ extra={"issue_id": issue.id, "error": e},
+ )
+ return ""
+
+ @staticmethod
+ def get_merged_pr_single_issue_template(title: str, url: str, environment: str) -> str:
+ truncated_title = PRCommentWorkflow._truncate_title(title)
+ return MERGED_PR_SINGLE_ISSUE_TEMPLATE.format(
+ title=truncated_title,
+ url=url,
+ environment=environment,
+ )
+
class OpenPRCommentWorkflow(ABC):
def __init__(self, integration: CommitContextIntegration):
diff --git a/src/sentry/issues/endpoints/browser_reporting_collector.py b/src/sentry/issues/endpoints/browser_reporting_collector.py
index 2bcf8976511..52fdcde5eeb 100644
--- a/src/sentry/issues/endpoints/browser_reporting_collector.py
+++ b/src/sentry/issues/endpoints/browser_reporting_collector.py
@@ -1,13 +1,14 @@
from __future__ import annotations
import logging
-from dataclasses import dataclass
-from typing import Any, Literal
+from typing import Any
-from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
+from rest_framework import serializers
from rest_framework.parsers import JSONParser
from rest_framework.request import Request
+from rest_framework.response import Response
+from rest_framework.status import HTTP_200_OK, HTTP_404_NOT_FOUND, HTTP_422_UNPROCESSABLE_ENTITY
from sentry import options
from sentry.api.api_owners import ApiOwner
@@ -17,30 +18,46 @@
logger = logging.getLogger(__name__)
-# Known browser report types as defined by the Browser Reporting API specification
-BrowserReportType = Literal[
- # Core report types (always sent to 'default' endpoint)
- "deprecation", # Deprecated API usage
- "intervention", # Browser interventions/blocks
- "crash", # Browser crashes
- # Policy violation report types (can be sent to named endpoints)
- "csp-violation", # Content Security Policy violations
- "coep", # Cross-Origin-Embedder-Policy violations
- "coop", # Cross-Origin-Opener-Policy violations
- "document-policy-violation", # Document Policy violations
- "permissions-policy", # Permissions Policy violations
+BROWSER_REPORT_TYPES = [
+ "deprecation",
+ "intervention",
+ "crash",
+ "csp-violation",
+ "coep",
+ "coop",
+ "document-policy-violation",
+ "permissions-policy",
]
-@dataclass
-class BrowserReport:
- body: dict[str, Any]
- type: BrowserReportType
- url: str
- user_agent: str
- destination: str
- timestamp: int
- attempts: int
+# Working Draft https://www.w3.org/TR/reporting-1/#concept-reports
+# Editor's Draft https://w3c.github.io/reporting/#concept-reports
+# We need to support both
+class BrowserReportSerializer(serializers.Serializer[Any]):
+ """Serializer for validating browser report data structure."""
+
+ body = serializers.DictField()
+ type = serializers.ChoiceField(choices=BROWSER_REPORT_TYPES)
+ url = serializers.URLField()
+ user_agent = serializers.CharField()
+ destination = serializers.CharField()
+ attempts = serializers.IntegerField(min_value=1)
+ # Fields that do not overlap between specs
+ # We need to support both specs
+ age = serializers.IntegerField(required=False)
+ timestamp = serializers.IntegerField(required=False, min_value=0)
+
+ def validate_timestamp(self, value: int) -> int:
+ """Validate that age is absent, but timestamp is present."""
+ if self.initial_data.get("age"):
+ raise serializers.ValidationError("If timestamp is present, age must be absent")
+ return value
+
+ def validate_age(self, value: int) -> int:
+ """Validate that age is present, but not timestamp."""
+ if self.initial_data.get("timestamp"):
+ raise serializers.ValidationError("If age is present, timestamp must be absent")
+ return value
class BrowserReportsJSONParser(JSONParser):
@@ -63,17 +80,15 @@ class BrowserReportingCollectorEndpoint(Endpoint):
permission_classes = ()
# Support both standard JSON and browser reporting API content types
parser_classes = [BrowserReportsJSONParser, JSONParser]
- publish_status = {
- "POST": ApiPublishStatus.PRIVATE,
- }
+ publish_status = {"POST": ApiPublishStatus.PRIVATE}
owner = ApiOwner.ISSUES
# CSRF exemption and CORS support required for Browser Reporting API
@csrf_exempt
@allow_cors_options
- def post(self, request: Request, *args: Any, **kwargs: Any) -> HttpResponse:
+ def post(self, request: Request, *args: Any, **kwargs: Any) -> Response:
if not options.get("issues.browser_reporting.collector_endpoint_enabled"):
- return HttpResponse(status=404)
+ return Response(status=HTTP_404_NOT_FOUND)
logger.info("browser_report_received", extra={"request_body": request.data})
@@ -86,14 +101,30 @@ def post(self, request: Request, *args: Any, **kwargs: Any) -> HttpResponse:
"browser_report_invalid_format",
extra={"data_type": type(raw_data).__name__, "data": raw_data},
)
- return HttpResponse(status=422)
+ return Response(status=HTTP_422_UNPROCESSABLE_ENTITY)
+ # Validate each report in the array
+ validated_reports = []
for report in raw_data:
- browser_report = BrowserReport(**report)
+ serializer = BrowserReportSerializer(data=report)
+ if not serializer.is_valid():
+ logger.warning(
+ "browser_report_validation_failed",
+ extra={"validation_errors": serializer.errors, "raw_report": report},
+ )
+ return Response(
+ {"error": "Invalid report data", "details": serializer.errors},
+ status=HTTP_422_UNPROCESSABLE_ENTITY,
+ )
+
+ validated_reports.append(serializer.validated_data)
+
+ # Process all validated reports
+ for browser_report in validated_reports:
metrics.incr(
"browser_reporting.raw_report_received",
- tags={"browser_report_type": browser_report.type},
+ tags={"browser_report_type": str(browser_report["type"])},
sample_rate=1.0, # XXX: Remove this once we have a ballpark figure
)
- return HttpResponse(status=200)
+ return Response(status=HTTP_200_OK)
diff --git a/src/sentry/issues/grouptype.py b/src/sentry/issues/grouptype.py
index 9680891ee13..e32414acf98 100644
--- a/src/sentry/issues/grouptype.py
+++ b/src/sentry/issues/grouptype.py
@@ -511,6 +511,7 @@ class DBQueryInjectionVulnerabilityGroupType(GroupType):
category_v2 = GroupCategory.DB_QUERY.value
enable_auto_resolve = False
enable_escalation_detection = False
+ noise_config = NoiseConfig(ignore_limit=5)
default_priority = PriorityLevel.MEDIUM
diff --git a/src/sentry/migrations/0917_convert_org_saved_searches_to_views.py b/src/sentry/migrations/0917_convert_org_saved_searches_to_views.py
index 83d1f9b6637..e8718423086 100644
--- a/src/sentry/migrations/0917_convert_org_saved_searches_to_views.py
+++ b/src/sentry/migrations/0917_convert_org_saved_searches_to_views.py
@@ -5,30 +5,15 @@
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
-from sentry.models.savedsearch import Visibility
from sentry.new_migrations.migrations import CheckedMigration
-from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
def convert_org_saved_searches_to_views(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
- SavedSearch = apps.get_model("sentry", "SavedSearch")
- GroupSearchView = apps.get_model("sentry", "GroupSearchView")
-
- org_saved_searches = SavedSearch.objects.filter(visibility=Visibility.ORGANIZATION)
-
- for saved_search in RangeQuerySetWrapperWithProgressBar(org_saved_searches):
- GroupSearchView.objects.update_or_create(
- organization=saved_search.organization,
- user_id=saved_search.owner_id,
- name=saved_search.name,
- defaults={
- "query": saved_search.query,
- "query_sort": saved_search.sort,
- "date_added": saved_search.date_added,
- },
- )
+ # This migration had an error and was never run.
+ # See 0921_convert_org_saved_searches_to_views_rerevised.py for the correct migration.
+ return
class Migration(CheckedMigration):
diff --git a/src/sentry/migrations/0920_convert_org_saved_searches_to_views_revised.py b/src/sentry/migrations/0920_convert_org_saved_searches_to_views_revised.py
index 10ca0ba7251..c3cf88fda38 100644
--- a/src/sentry/migrations/0920_convert_org_saved_searches_to_views_revised.py
+++ b/src/sentry/migrations/0920_convert_org_saved_searches_to_views_revised.py
@@ -4,32 +4,15 @@
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
-from sentry.models.savedsearch import Visibility
from sentry.new_migrations.migrations import CheckedMigration
-from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
def convert_org_saved_searches_to_views(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
- SavedSearch = apps.get_model("sentry", "SavedSearch")
- GroupSearchView = apps.get_model("sentry", "GroupSearchView")
-
- org_saved_searches = SavedSearch.objects.filter(
- visibility=Visibility.ORGANIZATION, owner_id__isnull=False
- )
-
- for saved_search in RangeQuerySetWrapperWithProgressBar(org_saved_searches):
- GroupSearchView.objects.update_or_create(
- organization=saved_search.organization,
- user_id=saved_search.owner_id,
- name=saved_search.name,
- defaults={
- "query": saved_search.query,
- "query_sort": saved_search.sort,
- "date_added": saved_search.date_added,
- },
- )
+ # This migration had an error and was never run.
+ # See 0921_convert_org_saved_searches_to_views_rerevised.py for the correct migration.
+ return
class Migration(CheckedMigration):
diff --git a/src/sentry/options/defaults.py b/src/sentry/options/defaults.py
index 98d4051a870..a48369b5a9d 100644
--- a/src/sentry/options/defaults.py
+++ b/src/sentry/options/defaults.py
@@ -2624,11 +2624,6 @@
default=0.0,
flags=FLAG_ADMIN_MODIFIABLE | FLAG_AUTOMATOR_MODIFIABLE | FLAG_RATE,
)
-register(
- "grouping.experiments.parameterization.traceparent",
- default=0.0,
- flags=FLAG_ADMIN_MODIFIABLE | FLAG_AUTOMATOR_MODIFIABLE | FLAG_RATE,
-)
# TODO: For now, only a small number of projects are going through a grouping config transition at
# any given time, so we're sampling at 100% in order to be able to get good signal. Once we've fully
diff --git a/src/sentry/preprod/__init__.py b/src/sentry/preprod/__init__.py
index e69de29bb2d..32860f7f157 100644
--- a/src/sentry/preprod/__init__.py
+++ b/src/sentry/preprod/__init__.py
@@ -0,0 +1 @@
+from .analytics import * # NOQA
diff --git a/src/sentry/preprod/analytics.py b/src/sentry/preprod/analytics.py
new file mode 100644
index 00000000000..96ea66e74f1
--- /dev/null
+++ b/src/sentry/preprod/analytics.py
@@ -0,0 +1,14 @@
+from sentry import analytics
+
+
+class PreprodArtifactApiAssembleEvent(analytics.Event):
+ type = "preprod_artifact.api.assemble"
+
+ attributes = (
+ analytics.Attribute("organization_id"),
+ analytics.Attribute("project_id"),
+ analytics.Attribute("user_id", required=False),
+ )
+
+
+analytics.register(PreprodArtifactApiAssembleEvent)
diff --git a/src/sentry/preprod/api/endpoints/organization_preprod_artifact_assemble.py b/src/sentry/preprod/api/endpoints/organization_preprod_artifact_assemble.py
index 20b1b2e68f7..c3ecef72cad 100644
--- a/src/sentry/preprod/api/endpoints/organization_preprod_artifact_assemble.py
+++ b/src/sentry/preprod/api/endpoints/organization_preprod_artifact_assemble.py
@@ -4,7 +4,7 @@
from rest_framework.request import Request
from rest_framework.response import Response
-from sentry import features
+from sentry import analytics, features
from sentry.api.api_owners import ApiOwner
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import region_silo_endpoint
@@ -77,6 +77,14 @@ def post(self, request: Request, project) -> Response:
"""
Assembles a preprod artifact (mobile build, etc.) and stores it in the database.
"""
+
+ analytics.record(
+ "preprod_artifact.api.assemble",
+ organization_id=project.organization_id,
+ project_id=project.id,
+ user_id=request.user.id,
+ )
+
if not features.has(
"organizations:preprod-artifact-assemble", project.organization, actor=request.user
):
diff --git a/src/sentry/projectoptions/defaults.py b/src/sentry/projectoptions/defaults.py
index 23f793aa099..d96cf2062a9 100644
--- a/src/sentry/projectoptions/defaults.py
+++ b/src/sentry/projectoptions/defaults.py
@@ -202,7 +202,7 @@
register(key="sentry:tempest_fetch_dumps", default=False)
# Should autofix run automatically on new issues
-register(key="sentry:autofix_automation_tuning", default="low")
+register(key="sentry:autofix_automation_tuning", default="off")
# Should seer scanner run automatically on new issues
register(key="sentry:seer_scanner_automation", default=False)
diff --git a/src/sentry/replays/endpoints/project_replay_summarize_breadcrumbs.py b/src/sentry/replays/endpoints/project_replay_summarize_breadcrumbs.py
index 8dd77f455b5..733b1211293 100644
--- a/src/sentry/replays/endpoints/project_replay_summarize_breadcrumbs.py
+++ b/src/sentry/replays/endpoints/project_replay_summarize_breadcrumbs.py
@@ -1,7 +1,7 @@
import functools
import logging
from collections.abc import Generator, Iterator
-from typing import Any
+from typing import Any, TypedDict
import requests
import sentry_sdk
@@ -10,13 +10,17 @@
from rest_framework.request import Request
from rest_framework.response import Response
-from sentry import features
+from sentry import features, nodestore
from sentry.api.api_owners import ApiOwner
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import region_silo_endpoint
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.paginator import GenericOffsetPaginator
+from sentry.eventstore.models import Event
+from sentry.models.project import Project
from sentry.replays.lib.storage import RecordingSegmentStorageMeta, storage
+from sentry.replays.post_process import process_raw_response
+from sentry.replays.query import query_replay_instance
from sentry.replays.usecases.ingest.event_parser import as_log_message
from sentry.replays.usecases.reader import fetch_segments_metadata, iter_segment_data
from sentry.seer.signed_seer_api import sign_with_seer_secret
@@ -25,6 +29,14 @@
logger = logging.getLogger(__name__)
+class ErrorEvent(TypedDict):
+ id: str
+ title: str
+ message: str
+ timestamp: float
+ category: str
+
+
@region_silo_endpoint
@extend_schema(tags=["Replays"])
class ProjectReplaySummarizeBreadcrumbsEndpoint(ProjectEndpoint):
@@ -37,7 +49,7 @@ def __init__(self, **options) -> None:
storage.initialize_client()
super().__init__(**options)
- def get(self, request: Request, project, replay_id: str) -> Response:
+ def get(self, request: Request, project: Project, replay_id: str) -> Response:
"""Return a collection of replay recording segments."""
if (
not features.has(
@@ -52,17 +64,117 @@ def get(self, request: Request, project, replay_id: str) -> Response:
):
return self.respond(status=404)
+ filter_params = self.get_filter_params(request, project)
+
+ # Fetch the replay's error IDs from the replay_id.
+ snuba_response = query_replay_instance(
+ project_id=project.id,
+ replay_id=replay_id,
+ start=filter_params["start"],
+ end=filter_params["end"],
+ organization=project.organization,
+ request_user_id=request.user.id,
+ )
+
+ response = process_raw_response(
+ snuba_response,
+ fields=request.query_params.getlist("field"),
+ )
+
+ error_ids = response[0].get("error_ids", []) if response else []
+
+ # Check if error fetching should be disabled
+ disable_error_fetching = (
+ request.query_params.get("enable_error_context", "true").lower() == "false"
+ )
+
+ if disable_error_fetching:
+ error_events = []
+ else:
+ error_events = fetch_error_details(project_id=project.id, error_ids=error_ids)
+
return self.paginate(
request=request,
paginator_cls=GenericOffsetPaginator,
data_fn=functools.partial(fetch_segments_metadata, project.id, replay_id),
- on_results=analyze_recording_segments,
+ on_results=functools.partial(analyze_recording_segments, error_events),
)
+def fetch_error_details(project_id: int, error_ids: list[str]) -> list[ErrorEvent]:
+ """Fetch error details given error IDs and return a list of ErrorEvent objects."""
+ try:
+ node_ids = [Event.generate_node_id(project_id, event_id=id) for id in error_ids]
+ events = nodestore.backend.get_multi(node_ids)
+
+ return [
+ ErrorEvent(
+ category="error",
+ id=event_id,
+ title=data.get("title", ""),
+ timestamp=data.get("timestamp", 0.0),
+ message=data.get("message", ""),
+ )
+ for event_id, data in zip(error_ids, events.values())
+ if data is not None
+ ]
+ except Exception as e:
+ sentry_sdk.capture_exception(e)
+ return []
+
+
+def generate_error_log_message(error: ErrorEvent) -> str:
+ title = error["title"]
+ message = error["message"]
+ timestamp = error["timestamp"]
+
+ return f"User experienced an error: '{title}: {message}' at {timestamp}"
+
+
+def get_request_data(
+ iterator: Iterator[tuple[int, memoryview]], error_events: list[ErrorEvent]
+) -> list[str]:
+ # Sort error events by timestamp
+ error_events.sort(key=lambda x: x["timestamp"])
+ return list(gen_request_data(iterator, error_events))
+
+
+def gen_request_data(
+ iterator: Iterator[tuple[int, memoryview]], error_events: list[ErrorEvent]
+) -> Generator[str]:
+ """Generate log messages from events and errors in chronological order."""
+ error_idx = 0
+
+ # Process segments
+ for _, segment in iterator:
+ events = json.loads(segment.tobytes().decode("utf-8"))
+ for event in events:
+ # Check if we need to yield any error messages that occurred before this event
+ while error_idx < len(error_events) and error_events[error_idx][
+ "timestamp"
+ ] < event.get("timestamp", 0):
+ error = error_events[error_idx]
+ yield generate_error_log_message(error)
+ error_idx += 1
+
+ # Yield the current event's log message
+ if message := as_log_message(event):
+ yield message
+
+ # Yield any remaining error messages
+ while error_idx < len(error_events):
+ error = error_events[error_idx]
+ yield generate_error_log_message(error)
+ error_idx += 1
+
+
@sentry_sdk.trace
-def analyze_recording_segments(segments: list[RecordingSegmentStorageMeta]) -> dict[str, Any]:
- request_data = json.dumps({"logs": get_request_data(iter_segment_data(segments))})
+def analyze_recording_segments(
+ error_events: list[ErrorEvent],
+ segments: list[RecordingSegmentStorageMeta],
+) -> dict[str, Any]:
+ # Combine breadcrumbs and error details
+ request_data = json.dumps({"logs": get_request_data(iter_segment_data(segments), error_events)})
# XXX: I have to deserialize this request so it can be "automatically" reserialized by the
# paginate method. This is less than ideal.
@@ -94,15 +206,3 @@ def make_seer_request(request_data: str) -> bytes:
response.raise_for_status()
return response.content
-
-
-def get_request_data(iterator: Iterator[tuple[int, memoryview]]) -> list[str]:
- return list(gen_request_data(map(lambda r: r[1], iterator)))
-
-
-def gen_request_data(segments: Iterator[memoryview]) -> Generator[str]:
- for segment in segments:
- for event in json.loads(segment.tobytes().decode("utf-8")):
- message = as_log_message(event)
- if message:
- yield message
diff --git a/src/sentry/replays/usecases/delete.py b/src/sentry/replays/usecases/delete.py
index a0ee4783db1..91b7ed40590 100644
--- a/src/sentry/replays/usecases/delete.py
+++ b/src/sentry/replays/usecases/delete.py
@@ -84,6 +84,11 @@ def _delete_if_exists(filename: str) -> None:
def _make_recording_filenames(project_id: int, row: MatchedRow) -> list[str]:
+ # Null segment_ids can cause this to fail. If no segments were ingested then we can skip
+ # deleting the segements.
+ if row["max_segment_id"] is None:
+ return []
+
# We assume every segment between 0 and the max_segment_id exists. Its a waste of time to
# delete a non-existent segment but its not so significant that we'd want to query ClickHouse
# to verify it exists.
@@ -104,7 +109,7 @@ def _make_recording_filenames(project_id: int, row: MatchedRow) -> list[str]:
class MatchedRow(TypedDict):
retention_days: int
replay_id: str
- max_segment_id: int
+ max_segment_id: int | None
platform: str
diff --git a/src/sentry/snuba/ourlogs.py b/src/sentry/snuba/ourlogs.py
index 76eb87e0978..9b355333d92 100644
--- a/src/sentry/snuba/ourlogs.py
+++ b/src/sentry/snuba/ourlogs.py
@@ -154,6 +154,7 @@ def run_top_events_timeseries_query(
referrer: str,
config: SearchResolverConfig,
sampling_mode: SAMPLING_MODES | None,
+ equations: list[str] | None = None,
) -> Any:
return rpc_dataset_common.run_top_events_timeseries_query(
get_resolver=get_resolver,
@@ -166,4 +167,5 @@ def run_top_events_timeseries_query(
referrer=referrer,
config=config,
sampling_mode=sampling_mode,
+ equations=equations,
)
diff --git a/src/sentry/tasks/auth/check_auth.py b/src/sentry/tasks/auth/check_auth.py
index 4477e1f295c..e8adb5739e1 100644
--- a/src/sentry/tasks/auth/check_auth.py
+++ b/src/sentry/tasks/auth/check_auth.py
@@ -73,7 +73,9 @@ def check_auth_identity(auth_identity_id: int, **kwargs):
name="sentry.tasks.check_auth_identities",
queue="auth.control",
silo_mode=SiloMode.CONTROL,
- taskworker_config=TaskworkerConfig(namespace=auth_control_tasks),
+ taskworker_config=TaskworkerConfig(
+ namespace=auth_control_tasks, processing_deadline_duration=60
+ ),
)
def check_auth_identities(
auth_identity_id: int | None = None,
diff --git a/src/sentry/workflow_engine/endpoints/validators/base/detector.py b/src/sentry/workflow_engine/endpoints/validators/base/detector.py
index 1732507064c..a32020860d5 100644
--- a/src/sentry/workflow_engine/endpoints/validators/base/detector.py
+++ b/src/sentry/workflow_engine/endpoints/validators/base/detector.py
@@ -5,6 +5,7 @@
from rest_framework import serializers
from sentry import audit_log
+from sentry.api.fields.actor import ActorField
from sentry.api.serializers.rest_framework import CamelSnakeSerializer
from sentry.issues import grouptype
from sentry.issues.grouptype import GroupType
@@ -33,6 +34,7 @@ class BaseDetectorTypeValidator(CamelSnakeSerializer):
)
type = serializers.CharField()
config = serializers.JSONField(default={})
+ owner = ActorField(required=False, allow_null=True)
def validate_type(self, value: str) -> builtins.type[GroupType]:
type = grouptype.registry.get_by_slug(value)
@@ -60,6 +62,22 @@ def data_conditions(self) -> BaseDataConditionValidator:
def update(self, instance: Detector, validated_data: dict[str, Any]):
instance.name = validated_data.get("name", instance.name)
instance.type = validated_data.get("detector_type", instance.group_type).slug
+
+ # Handle owner field update
+ if "owner" in validated_data:
+ owner = validated_data.get("owner")
+ if owner:
+ if owner.is_user:
+ instance.owner_user_id = owner.id
+ instance.owner_team_id = None
+ elif owner.is_team:
+ instance.owner_user_id = None
+ instance.owner_team_id = owner.id
+ else:
+ # Clear owner if None is passed
+ instance.owner_user_id = None
+ instance.owner_team_id = None
+
condition_group = validated_data.pop("condition_group")
data_conditions: list[DataConditionType] = condition_group.get("conditions")
@@ -98,12 +116,24 @@ def create(self, validated_data):
type=condition["type"],
condition_group=condition_group,
)
+
+ owner = validated_data.get("owner")
+ owner_user_id = None
+ owner_team_id = None
+ if owner:
+ if owner.is_user:
+ owner_user_id = owner.id
+ elif owner.is_team:
+ owner_team_id = owner.id
+
detector = Detector.objects.create(
project_id=self.context["project"].id,
name=validated_data["name"],
workflow_condition_group=condition_group,
type=validated_data["type"].slug,
config=validated_data.get("config", {}),
+ owner_user_id=owner_user_id,
+ owner_team_id=owner_team_id,
created_by_id=self.context["request"].user.id,
)
DataSourceDetector.objects.create(data_source=detector_data_source, detector=detector)
diff --git a/src/sentry/workflow_engine/processors/delayed_workflow.py b/src/sentry/workflow_engine/processors/delayed_workflow.py
index 07847099891..f627d90d036 100644
--- a/src/sentry/workflow_engine/processors/delayed_workflow.py
+++ b/src/sentry/workflow_engine/processors/delayed_workflow.py
@@ -354,13 +354,12 @@ def get_condition_query_groups(
data_condition_groups: list[DataConditionGroup],
event_data: EventRedisData,
workflows_to_envs: Mapping[WorkflowId, int | None],
+ dcg_to_slow_conditions: dict[DataConditionGroupId, list[DataCondition]],
) -> dict[UniqueConditionQuery, set[GroupId]]:
"""
Map unique condition queries to the group IDs that need to checked for that query.
"""
condition_groups: dict[UniqueConditionQuery, set[GroupId]] = defaultdict(set)
- dcg_to_slow_conditions = get_slow_conditions_for_groups(list(event_data.dcg_to_groups.keys()))
-
for dcg in data_condition_groups:
slow_conditions = dcg_to_slow_conditions[dcg.id]
workflow_id = event_data.dcg_to_workflow.get(dcg.id)
@@ -412,9 +411,9 @@ def get_groups_to_fire(
workflows_to_envs: Mapping[WorkflowId, int | None],
event_data: EventRedisData,
condition_group_results: dict[UniqueConditionQuery, QueryResult],
+ dcg_to_slow_conditions: dict[DataConditionGroupId, list[DataCondition]],
) -> dict[GroupId, set[DataConditionGroup]]:
groups_to_fire: dict[GroupId, set[DataConditionGroup]] = defaultdict(set)
- dcg_to_slow_conditions = get_slow_conditions_for_groups(list(event_data.dcg_ids))
for dcg in data_condition_groups:
slow_conditions = dcg_to_slow_conditions[dcg.id]
@@ -581,7 +580,7 @@ def fire_actions_for_groups(
extra={
"workflow_ids": [workflow.id for workflow in workflows],
"actions": [action.id for action in filtered_actions],
- "event_data": event_data,
+ "event_data": workflow_event_data,
"event_id": workflow_event_data.event.event_id,
},
)
@@ -650,6 +649,18 @@ def process_delayed_workflows(
workflows_to_envs = fetch_workflows_envs(list(event_data.workflow_ids))
data_condition_groups = fetch_data_condition_groups(list(event_data.dcg_ids))
+ dcg_to_slow_conditions = get_slow_conditions_for_groups(list(event_data.dcg_ids))
+
+ no_slow_condition_groups = {
+ dcg_id for dcg_id, slow_conds in dcg_to_slow_conditions.items() if not slow_conds
+ }
+ if no_slow_condition_groups:
+ # If the DCG is being processed here, it's because we thought it had a slow condition.
+ # If any don't seem to have a slow condition now, that's interesting enough to log.
+ logger.info(
+ "delayed_workflow.no_slow_condition_groups",
+ extra={"no_slow_condition_groups": sorted(no_slow_condition_groups)},
+ )
logger.info(
"delayed_workflow.workflows",
@@ -661,7 +672,7 @@ def process_delayed_workflows(
# Get unique query groups to query Snuba
condition_groups = get_condition_query_groups(
- data_condition_groups, event_data, workflows_to_envs
+ data_condition_groups, event_data, workflows_to_envs, dcg_to_slow_conditions
)
if not condition_groups:
return
@@ -688,6 +699,7 @@ def process_delayed_workflows(
workflows_to_envs,
event_data,
condition_group_results,
+ dcg_to_slow_conditions,
)
logger.info(
"delayed_workflow.groups_to_fire",
diff --git a/src/sentry/workflow_engine/processors/workflow.py b/src/sentry/workflow_engine/processors/workflow.py
index c1683e5e100..10521166584 100644
--- a/src/sentry/workflow_engine/processors/workflow.py
+++ b/src/sentry/workflow_engine/processors/workflow.py
@@ -1,4 +1,3 @@
-import logging
from collections.abc import Collection, Mapping
from dataclasses import asdict, dataclass, replace
from enum import StrEnum
@@ -33,7 +32,7 @@
from sentry.workflow_engine.utils import log_context
from sentry.workflow_engine.utils.metrics import metrics_incr
-logger = logging.getLogger(__name__)
+logger = log_context.get_logger(__name__)
WORKFLOW_ENGINE_BUFFER_LIST_KEY = "workflow_engine_delayed_processing_buffer"
diff --git a/static/app/components/codeSnippet.tsx b/static/app/components/codeSnippet.tsx
index e6f1fba79a2..4f7ffbe50f4 100644
--- a/static/app/components/codeSnippet.tsx
+++ b/static/app/components/codeSnippet.tsx
@@ -4,6 +4,7 @@ import styled from '@emotion/styled';
import Prism from 'prismjs';
import {Button} from 'sentry/components/core/button';
+import {Flex} from 'sentry/components/core/layout';
import {IconCopy} from 'sentry/icons';
import {t} from 'sentry/locale';
import {space} from 'sentry/styles/space';
@@ -163,12 +164,12 @@ export function CodeSnippet({
))}
-
+
)}
{icon}
{filename && {filename}}
- {!hasTabs && }
+ {!hasTabs && }
{!hideCopyButton && (
`
: ''}
`;
-const FlexSpacer = styled('div')`
- flex-grow: 1;
-`;
-
const CopyButton = styled(Button)<{isAlwaysVisible: boolean}>`
color: var(--prism-comment);
transition: opacity 0.1s ease-out;
diff --git a/static/app/components/codecov/branchSelector/branchSelector.tsx b/static/app/components/codecov/branchSelector/branchSelector.tsx
index 9ead2764b47..e469f5b11b2 100644
--- a/static/app/components/codecov/branchSelector/branchSelector.tsx
+++ b/static/app/components/codecov/branchSelector/branchSelector.tsx
@@ -5,6 +5,7 @@ import styled from '@emotion/styled';
import {useCodecovContext} from 'sentry/components/codecov/context/codecovContext';
import type {SelectOption} from 'sentry/components/core/compactSelect';
import {CompactSelect} from 'sentry/components/core/compactSelect';
+import {Flex} from 'sentry/components/core/layout';
import DropdownButton from 'sentry/components/dropdownButton';
import {t} from 'sentry/locale';
import {space} from 'sentry/styles/space';
@@ -60,12 +61,12 @@ export function BranchSelector() {
{...triggerProps}
>
-
+
{branch || t('Select branch')}
-
+
);
@@ -95,12 +96,6 @@ const OptionLabel = styled('span')`
}
`;
-const FlexContainer = styled('div')`
- display: flex;
- align-items: center;
- gap: ${space(0.75)};
-`;
-
const IconContainer = styled('div')`
flex: 1 0 14px;
height: 14px;
diff --git a/static/app/components/codecov/datePicker/dateSelector.tsx b/static/app/components/codecov/datePicker/dateSelector.tsx
index 4089cab8f2b..de590aeca6d 100644
--- a/static/app/components/codecov/datePicker/dateSelector.tsx
+++ b/static/app/components/codecov/datePicker/dateSelector.tsx
@@ -3,6 +3,7 @@ import styled from '@emotion/styled';
import type {SelectOption, SingleSelectProps} from 'sentry/components/core/compactSelect';
import {CompactSelect} from 'sentry/components/core/compactSelect';
+import {Flex} from 'sentry/components/core/layout';
import DropdownButton from 'sentry/components/dropdownButton';
import {getArbitraryRelativePeriod} from 'sentry/components/timeRangeSelector/utils';
import {IconCalendar} from 'sentry/icons/iconCalendar';
@@ -80,10 +81,10 @@ export function DateSelector({relativeDate, onChange, trigger}: DateSelectorProp
{...triggerProps}
>
-
+
{defaultLabel}
-
+
);
@@ -108,9 +109,3 @@ const OptionLabel = styled('span')`
margin: 0;
}
`;
-
-const FlexContainer = styled('div')`
- display: flex;
- align-items: center;
- gap: ${space(0.75)};
-`;
diff --git a/static/app/components/codecov/integratedOrgSelector/integratedOrgSelector.tsx b/static/app/components/codecov/integratedOrgSelector/integratedOrgSelector.tsx
index 1def6fd99c4..6fe564343cd 100644
--- a/static/app/components/codecov/integratedOrgSelector/integratedOrgSelector.tsx
+++ b/static/app/components/codecov/integratedOrgSelector/integratedOrgSelector.tsx
@@ -6,6 +6,7 @@ import {useCodecovContext} from 'sentry/components/codecov/context/codecovContex
import {LinkButton} from 'sentry/components/core/button/linkButton';
import type {SelectOption} from 'sentry/components/core/compactSelect';
import {CompactSelect} from 'sentry/components/core/compactSelect';
+import {Flex} from 'sentry/components/core/layout';
import DropdownButton from 'sentry/components/dropdownButton';
import {IconAdd, IconInfo} from 'sentry/icons';
import {t} from 'sentry/locale';
@@ -33,7 +34,7 @@ function OrgFooterMessage() {
-
+
@@ -43,7 +44,7 @@ function OrgFooterMessage() {
Ensure you log in to the same GitHub identity
-
+
);
}
@@ -95,14 +96,14 @@ export function IntegratedOrgSelector() {
{...triggerProps}
>
-
+
{integratedOrg || t('Select integrated organization')}
-
+
);
@@ -160,21 +161,6 @@ const MenuFooterDivider = styled('div')`
}
`;
-const FlexContainer = styled('div')`
- display: flex;
- flex-direction: row;
- justify-content: flex-start;
- gap: ${space(1)};
-`;
-
-const TriggerFlexContainer = styled('div')`
- display: flex;
- flex-direction: row;
- justify-content: flex-start;
- gap: ${space(0.75)};
- align-items: center;
-`;
-
const IconContainer = styled('div')`
flex: 1 0 14px;
height: 14px;
diff --git a/static/app/components/codecov/repoPicker/repoSelector.tsx b/static/app/components/codecov/repoPicker/repoSelector.tsx
index c26916626ca..55f967834ca 100644
--- a/static/app/components/codecov/repoPicker/repoSelector.tsx
+++ b/static/app/components/codecov/repoPicker/repoSelector.tsx
@@ -4,6 +4,7 @@ import styled from '@emotion/styled';
import {Button} from 'sentry/components/core/button';
import type {SelectOption, SingleSelectProps} from 'sentry/components/core/compactSelect';
import {CompactSelect} from 'sentry/components/core/compactSelect';
+import {Flex} from 'sentry/components/core/layout';
import DropdownButton from 'sentry/components/dropdownButton';
import Link from 'sentry/components/links/link';
import {IconInfo, IconSync} from 'sentry/icons';
@@ -118,12 +119,12 @@ export function RepoSelector({onChange, trigger, repository}: RepoSelectorProps)
{...triggerProps}
>
-
+
{defaultLabel}
-
+
);
@@ -178,12 +179,6 @@ const OptionLabel = styled('span')`
}
`;
-const FlexContainer = styled('div')`
- display: flex;
- align-items: center;
- gap: ${space(0.75)};
-`;
-
const IconContainer = styled('div')`
flex: 1 0 14px;
height: 14px;
diff --git a/static/app/components/core/button/styles.chonk.tsx b/static/app/components/core/button/styles.chonk.tsx
index b387f05c403..d8c351493db 100644
--- a/static/app/components/core/button/styles.chonk.tsx
+++ b/static/app/components/core/button/styles.chonk.tsx
@@ -110,7 +110,7 @@ export function DO_NOT_USE_getChonkButtonStyles(
borderRadius: 'inherit',
border: `1px solid ${getChonkButtonTheme(type, p.theme).background}`,
transform: `translateY(-${chonkElevation(p.size)})`,
- transition: 'transform 0.1s ease-in-out',
+ transition: 'transform 0.06s ease-in-out',
},
'&:focus-visible': {
diff --git a/static/app/components/events/eventAttachments.tsx b/static/app/components/events/eventAttachments.tsx
index 2c1a90cd320..51fa18dbfda 100644
--- a/static/app/components/events/eventAttachments.tsx
+++ b/static/app/components/events/eventAttachments.tsx
@@ -6,6 +6,7 @@ import {
useFetchEventAttachments,
} from 'sentry/actionCreators/events';
import {LinkButton} from 'sentry/components/core/button/linkButton';
+import {Flex} from 'sentry/components/core/layout';
import EventAttachmentActions from 'sentry/components/events/eventAttachmentActions';
import FileSize from 'sentry/components/fileSize';
import LoadingError from 'sentry/components/loadingError';
@@ -139,9 +140,10 @@ function EventAttachmentsContent({
>
{attachments.map(attachment => (
-
+
{attachment.name}
-
+
+
@@ -198,12 +200,6 @@ const StyledPanelTable = styled(PanelTable)`
grid-template-columns: 1fr auto auto;
`;
-const FlexCenter = styled('div')`
- ${p => p.theme.overflowEllipsis};
- display: flex;
- align-items: center;
-`;
-
const Name = styled('div')`
${p => p.theme.overflowEllipsis};
white-space: nowrap;
diff --git a/static/app/components/events/groupingInfo/groupingVariant.tsx b/static/app/components/events/groupingInfo/groupingVariant.tsx
index 1843b1b7047..47d3d174db8 100644
--- a/static/app/components/events/groupingInfo/groupingVariant.tsx
+++ b/static/app/components/events/groupingInfo/groupingVariant.tsx
@@ -100,68 +100,19 @@ function GroupingVariant({event, showGroupingConfig, variant}: GroupingVariantPr
switch (variant.type) {
case EventGroupVariantType.COMPONENT:
component = variant.component;
- data.push([
- t('Type'),
-
- {variant.type}
-
- ,
- ]);
+
if (showGroupingConfig && variant.config?.id) {
data.push([t('Grouping Config'), variant.config.id]);
}
break;
case EventGroupVariantType.CUSTOM_FINGERPRINT:
- data.push([
- t('Type'),
-
- {variant.type}
-
- ,
- ]);
addFingerprintInfo(data, variant);
break;
case EventGroupVariantType.BUILT_IN_FINGERPRINT:
- data.push([
- t('Type'),
-
- {variant.type}
-
- ,
- ]);
addFingerprintInfo(data, variant);
break;
case EventGroupVariantType.SALTED_COMPONENT:
component = variant.component;
- data.push([
- t('Type'),
-
- {variant.type}
-
- ,
- ]);
addFingerprintInfo(data, variant);
if (showGroupingConfig && variant.config?.id) {
data.push([t('Grouping Config'), variant.config.id]);
@@ -173,19 +124,6 @@ function GroupingVariant({event, showGroupingConfig, variant}: GroupingVariantPr
.find((c): c is EntrySpans => c.type === 'spans')
?.data?.map((span: RawSpanType) => [span.span_id, span.hash]) ?? []
);
- data.push([
- t('Type'),
-
- {variant.type}
-
- ,
- ]);
data.push(['Performance Issue Type', variant.key]);
data.push(['Span Operation', variant.evidence.op]);
diff --git a/static/app/components/events/interfaces/spans/newTraceDetailsHeader.tsx b/static/app/components/events/interfaces/spans/newTraceDetailsHeader.tsx
index e80d3f639f3..aafc5f6ddef 100644
--- a/static/app/components/events/interfaces/spans/newTraceDetailsHeader.tsx
+++ b/static/app/components/events/interfaces/spans/newTraceDetailsHeader.tsx
@@ -1,6 +1,7 @@
import {Fragment} from 'react';
import styled from '@emotion/styled';
+import {Flex} from 'sentry/components/core/layout';
import {generateStats} from 'sentry/components/events/opsBreakdown';
import {DividerSpacer} from 'sentry/components/performance/waterfall/miniHeader';
import {t} from 'sentry/locale';
@@ -32,18 +33,18 @@ function ServiceBreakdown({
if (!displayBreakdown) {
return (
-
+
{t('server side')}
-
+
{'N/A'}
-
-
-
+
+
+
{t('client side')}
-
+
{'N/A'}
-
-
+
+
);
}
@@ -57,20 +58,20 @@ function ServiceBreakdown({
return httpDuration ? (
-
+
{t('server side')}
-
+
{getDuration(httpDuration, 2, true)}
{serverSidePct}%
-
-
-
+
+
+
{t('client side')}
-
+
{getDuration(totalDuration - httpDuration, 2, true)}
{clientSidePct}%
-
-
+
+
) : null;
}
@@ -151,18 +152,10 @@ const Pct = styled('div')`
font-variant-numeric: tabular-nums;
`;
-const FlexBox = styled('div')`
+const BreakDownWrapper = styled('div')`
display: flex;
-`;
-
-const BreakDownWrapper = styled(FlexBox)`
flex-direction: column;
padding: ${space(2)};
`;
-const BreakDownRow = styled(FlexBox)`
- align-items: center;
- justify-content: space-between;
-`;
-
export default TraceViewHeader;
diff --git a/static/app/components/feedback/feedbackSummary.tsx b/static/app/components/feedback/feedbackSummary.tsx
new file mode 100644
index 00000000000..8cf3c23dcb8
--- /dev/null
+++ b/static/app/components/feedback/feedbackSummary.tsx
@@ -0,0 +1,64 @@
+import styled from '@emotion/styled';
+
+import useFeedbackSummary from 'sentry/components/feedback/list/useFeedbackSummary';
+import Placeholder from 'sentry/components/placeholder';
+import {IconSeer} from 'sentry/icons/iconSeer';
+import {t} from 'sentry/locale';
+import {space} from 'sentry/styles/space';
+import useOrganization from 'sentry/utils/useOrganization';
+
+export default function FeedbackSummary() {
+ const {isError, isPending, summary, tooFewFeedbacks} = useFeedbackSummary();
+
+ const organization = useOrganization();
+
+ if (
+ !organization.features.includes('user-feedback-ai-summaries') ||
+ tooFewFeedbacks ||
+ isError
+ ) {
+ return null;
+ }
+
+ if (isPending) {
+ return ;
+ }
+
+ return (
+
+
+
+ {t('Feedback Summary')}
+ {summary}
+
+
+ );
+}
+
+const SummaryContainer = styled('div')`
+ display: flex;
+ flex-direction: column;
+ gap: ${space(1)};
+ width: 100%;
+`;
+
+const SummaryHeader = styled('p')`
+ font-size: ${p => p.theme.fontSizeMedium};
+ font-weight: ${p => p.theme.fontWeightBold};
+ margin: 0;
+`;
+
+const SummaryContent = styled('p')`
+ font-size: ${p => p.theme.fontSizeSmall};
+ color: ${p => p.theme.subText};
+ margin: 0;
+`;
+
+const SummaryIconContainer = styled('div')`
+ display: flex;
+ gap: ${space(1)};
+ padding: ${space(2)};
+ border: 1px solid ${p => p.theme.border};
+ border-radius: ${p => p.theme.borderRadius};
+ align-items: baseline;
+`;
diff --git a/static/app/components/feedback/list/useFeedbackSummary.tsx b/static/app/components/feedback/list/useFeedbackSummary.tsx
new file mode 100644
index 00000000000..e593e29b0cc
--- /dev/null
+++ b/static/app/components/feedback/list/useFeedbackSummary.tsx
@@ -0,0 +1,67 @@
+import {normalizeDateTimeParams} from 'sentry/components/organizations/pageFilters/parse';
+import {useApiQuery} from 'sentry/utils/queryClient';
+import useOrganization from 'sentry/utils/useOrganization';
+import usePageFilters from 'sentry/utils/usePageFilters';
+
+type FeedbackSummaryResponse = {
+ numFeedbacksUsed: number;
+ success: boolean;
+ summary: string | null;
+};
+
+export default function useFeedbackSummary(): {
+ isError: boolean;
+ isPending: boolean;
+ summary: string | null;
+ tooFewFeedbacks: boolean;
+} {
+ const organization = useOrganization();
+
+ const {selection} = usePageFilters();
+
+ const normalizedDateRange = normalizeDateTimeParams(selection.datetime);
+
+ const {data, isPending, isError} = useApiQuery(
+ [
+ `/organizations/${organization.slug}/feedback-summary/`,
+ {
+ query: {
+ ...normalizedDateRange,
+ project: selection.projects,
+ },
+ },
+ ],
+ {
+ staleTime: 5000,
+ enabled:
+ Boolean(normalizedDateRange) &&
+ organization.features.includes('user-feedback-ai-summaries'),
+ retry: 1,
+ }
+ );
+
+ if (isPending) {
+ return {
+ summary: null,
+ isPending: true,
+ isError: false,
+ tooFewFeedbacks: false,
+ };
+ }
+
+ if (isError) {
+ return {
+ summary: null,
+ isPending: false,
+ isError: true,
+ tooFewFeedbacks: false,
+ };
+ }
+
+ return {
+ summary: data.summary,
+ isPending: false,
+ isError: false,
+ tooFewFeedbacks: data.numFeedbacksUsed === 0 && !data.success,
+ };
+}
diff --git a/static/app/components/group/times.tsx b/static/app/components/group/times.tsx
index c4f9d28f1d8..e18e4c6fc0a 100644
--- a/static/app/components/group/times.tsx
+++ b/static/app/components/group/times.tsx
@@ -1,6 +1,8 @@
import {Fragment} from 'react';
import styled from '@emotion/styled';
+import {Flex} from 'sentry/components/core/layout';
+import TextOverflow from 'sentry/components/textOverflow';
import TimeSince from 'sentry/components/timeSince';
import {IconClock} from 'sentry/icons';
import {t} from 'sentry/locale';
@@ -19,20 +21,28 @@ type Props = {
function Times({lastSeen, firstSeen}: Props) {
return (
-
+
{lastSeen && (
-
-
+
+
+
+
)}
{firstSeen && lastSeen && (
—
)}
{firstSeen && (
-
+
+
+
)}
-
+
);
}
@@ -42,18 +52,11 @@ const Container = styled('div')`
min-width: 0; /* flex-hack for overflow-ellipsised children */
`;
-const FlexWrapper = styled('div')`
- ${p => p.theme.overflowEllipsis}
-
- /* The following aligns the icon with the text, fixes bug in Firefox */
- display: flex;
- align-items: center;
-`;
-
const StyledIconClock = styled(IconClock)`
/* this is solely for optics, since TimeSince always begins
with a number, and numbers do not have descenders */
margin-right: ${space(0.5)};
+ min-width: 12px;
`;
export default Times;
diff --git a/static/app/components/replays/breadcrumbs/breadcrumbItem.tsx b/static/app/components/replays/breadcrumbs/breadcrumbItem.tsx
index ee0d9b79488..5d98fc59cbb 100644
--- a/static/app/components/replays/breadcrumbs/breadcrumbItem.tsx
+++ b/static/app/components/replays/breadcrumbs/breadcrumbItem.tsx
@@ -111,7 +111,7 @@ function BreadcrumbItem({
onShowSnippet();
e.preventDefault();
e.stopPropagation();
- trackAnalytics('replay.view_html', {
+ trackAnalytics('replay.view-html', {
organization,
breadcrumb_type: 'category' in frame ? frame.category : 'unknown',
});
diff --git a/static/app/components/replays/timeAndScrubberGrid.tsx b/static/app/components/replays/timeAndScrubberGrid.tsx
index e4375dab72e..f325db0faa6 100644
--- a/static/app/components/replays/timeAndScrubberGrid.tsx
+++ b/static/app/components/replays/timeAndScrubberGrid.tsx
@@ -1,4 +1,4 @@
-import {useRef} from 'react';
+import {useCallback, useRef} from 'react';
import {css} from '@emotion/react';
import styled from '@emotion/styled';
@@ -14,10 +14,12 @@ import {useReplayContext} from 'sentry/components/replays/replayContext';
import {IconAdd, IconSubtract} from 'sentry/icons';
import {t} from 'sentry/locale';
import {space} from 'sentry/styles/space';
+import {trackAnalytics} from 'sentry/utils/analytics';
import useTimelineScale, {
TimelineScaleContextProvider,
} from 'sentry/utils/replays/hooks/useTimelineScale';
import {useReplayPrefs} from 'sentry/utils/replays/playback/providers/replayPreferencesContext';
+import useOrganization from 'sentry/utils/useOrganization';
type TimeAndScrubberGridProps = {
isCompact?: boolean;
@@ -27,10 +29,27 @@ type TimeAndScrubberGridProps = {
function TimelineSizeBar({isLoading}: {isLoading?: boolean}) {
const {replay} = useReplayContext();
+ const organization = useOrganization();
const [timelineScale, setTimelineScale] = useTimelineScale();
const durationMs = replay?.getDurationMs();
const maxScale = durationMs ? Math.ceil(durationMs / 60000) : 10;
+ const handleZoomOut = useCallback(() => {
+ const newScale = Math.max(timelineScale - 1, 1);
+ setTimelineScale(newScale);
+ trackAnalytics('replay.timeline.zoom-out', {
+ organization,
+ });
+ }, [timelineScale, setTimelineScale, organization]);
+
+ const handleZoomIn = useCallback(() => {
+ const newScale = Math.min(timelineScale + 1, maxScale);
+ setTimelineScale(newScale);
+ trackAnalytics('replay.timeline.zoom-in', {
+ organization,
+ });
+ }, [timelineScale, maxScale, setTimelineScale, organization]);
+
return (
}
borderless
- onClick={() => setTimelineScale(Math.max(timelineScale - 1, 1))}
+ onClick={handleZoomOut}
aria-label={t('Zoom out')}
disabled={timelineScale === 1 || isLoading}
/>
@@ -51,7 +70,7 @@ function TimelineSizeBar({isLoading}: {isLoading?: boolean}) {
title={t('Zoom in')}
icon={}
borderless
- onClick={() => setTimelineScale(Math.min(timelineScale + 1, maxScale))}
+ onClick={handleZoomIn}
aria-label={t('Zoom in')}
disabled={timelineScale === maxScale || isLoading}
/>
diff --git a/static/app/components/scrollCarousel.tsx b/static/app/components/scrollCarousel.tsx
index a517d7c5876..65bc94c064e 100644
--- a/static/app/components/scrollCarousel.tsx
+++ b/static/app/components/scrollCarousel.tsx
@@ -201,7 +201,7 @@ const RightMask = styled('div')<{transparentMask: boolean}>`
right: 0;
background: ${p =>
p.transparentMask
- ? 'linear-gradient(to right, rgba(255, 255, 255, 0), rgba(255, 255, 255, 1))'
+ ? `linear-gradient(to right, transparent, ${p.theme.background})`
: `linear-gradient(
270deg,
${p.theme.background} 50%,
diff --git a/static/app/utils/analytics/replayAnalyticsEvents.tsx b/static/app/utils/analytics/replayAnalyticsEvents.tsx
index 2b72bd20577..a48ef1bef36 100644
--- a/static/app/utils/analytics/replayAnalyticsEvents.tsx
+++ b/static/app/utils/analytics/replayAnalyticsEvents.tsx
@@ -113,6 +113,8 @@ export type ReplayEventParameters = {
'replay.search': {
search_keys: string;
};
+ 'replay.timeline.zoom-in': Record;
+ 'replay.timeline.zoom-out': Record;
'replay.toggle-fullscreen': {
context: string;
fullscreen: boolean;
@@ -153,6 +155,8 @@ export const replayEventMap: Record = {
'replay.render-issues-group-list': 'Render Issues Detail Replay List',
'replay.render-missing-replay-alert': 'Render Missing Replay Alert',
'replay.search': 'Searched Replay',
+ 'replay.timeline.zoom-in': 'Zoomed In Replay Timeline',
+ 'replay.timeline.zoom-out': 'Zoomed Out Replay Timeline',
'replay.toggle-fullscreen': 'Toggled Replay Fullscreen',
'replay.view-html': 'Clicked "View HTML" in Replay Breadcrumb',
};
diff --git a/static/app/views/alerts/list/rules/alertRuleStatus.tsx b/static/app/views/alerts/list/rules/alertRuleStatus.tsx
index 5eb3f250275..7e0a9d5649a 100644
--- a/static/app/views/alerts/list/rules/alertRuleStatus.tsx
+++ b/static/app/views/alerts/list/rules/alertRuleStatus.tsx
@@ -1,5 +1,6 @@
import styled from '@emotion/styled';
+import {Flex} from 'sentry/components/core/layout';
import {IconArrow, IconMute, IconNot} from 'sentry/icons';
import {t} from 'sentry/locale';
import {space} from 'sentry/styles/space';
@@ -88,7 +89,7 @@ export default function AlertRuleStatus({rule}: Props) {
}
return (
-
+
{rule.detectionType !== AlertRuleComparisonType.DYNAMIC && (
)}
@@ -109,7 +110,7 @@ export default function AlertRuleStatus({rule}: Props) {
)}
)}
-
+
);
}
@@ -125,9 +126,3 @@ const TriggerText = styled('div')`
white-space: nowrap;
font-variant-numeric: tabular-nums;
`;
-
-// TODO: explore utilizing the FlexContainer from app/components/container/flex.tsx
-const FlexCenter = styled('div')`
- display: flex;
- align-items: center;
-`;
diff --git a/static/app/views/alerts/list/rules/row.tsx b/static/app/views/alerts/list/rules/row.tsx
index 1bc99ea39ba..e780d3441ec 100644
--- a/static/app/views/alerts/list/rules/row.tsx
+++ b/static/app/views/alerts/list/rules/row.tsx
@@ -10,6 +10,7 @@ import {
CompactSelect,
type SelectOptionOrSection,
} from 'sentry/components/core/compactSelect';
+import {Flex} from 'sentry/components/core/layout';
import {Tooltip} from 'sentry/components/core/tooltip';
import type {MenuItemProps} from 'sentry/components/dropdownMenu';
import {DropdownMenu} from 'sentry/components/dropdownMenu';
@@ -265,26 +266,26 @@ function RuleListRow({
-
-
+
+
-
+
{!isUptime && !isCron && (
)}
-
-
+
+
-
+
-
+
{ownerActor ? (
) : (
@@ -311,7 +312,7 @@ function RuleListRow({
)}
)}
-
+
{({hasAccess}) => (
@@ -333,12 +334,6 @@ function RuleListRow({
);
}
-// TODO: see static/app/components/profiling/flex.tsx and utilize the FlexContainer styled component
-const FlexCenter = styled('div')`
- display: flex;
- align-items: center;
-`;
-
const AlertNameWrapper = styled('div')<{isIssueAlert?: boolean}>`
${p => p.theme.overflowEllipsis}
display: flex;
diff --git a/static/app/views/codecov/tests/onboardingSteps/addUploadToken.tsx b/static/app/views/codecov/tests/onboardingSteps/addUploadToken.tsx
index 784c65b9700..23320d27250 100644
--- a/static/app/views/codecov/tests/onboardingSteps/addUploadToken.tsx
+++ b/static/app/views/codecov/tests/onboardingSteps/addUploadToken.tsx
@@ -1,9 +1,9 @@
import {Fragment, useState} from 'react';
-import styled from '@emotion/styled';
import {CodeSnippet} from 'sentry/components/codeSnippet';
import {Alert} from 'sentry/components/core/alert';
import {Button} from 'sentry/components/core/button';
+import {Flex} from 'sentry/components/core/layout';
import Link from 'sentry/components/links/link';
import {IconClose} from 'sentry/icons';
import {t, tct} from 'sentry/locale';
@@ -74,26 +74,26 @@ export function AddUploadToken({step}: AddUploadTokenProps) {
)}
-
-
+
+
SENTRY_PREVENT_TOKEN
{FULL_TOKEN}
-
+
-
+
) : (
-
-
+
+
SENTRY_PREVENT_TOKEN
{TRUNCATED_TOKEN}
-
+
-
+
)
) : (
-
+
Delete client
-
+
)}
{errorMessage && {errorMessage}
}
@@ -180,9 +181,3 @@ const StyledButton = styled(Button)`
margin-top: 20px;
margin-bottom: 15px;
`;
-
-const FlexDiv = styled('div')`
- display: flex;
- width: 100%;
- justify-content: right;
-`;
diff --git a/tests/js/fixtures/tabularColumn.ts b/tests/js/fixtures/tabularColumn.ts
new file mode 100644
index 00000000000..d80626514e3
--- /dev/null
+++ b/tests/js/fixtures/tabularColumn.ts
@@ -0,0 +1,11 @@
+import type {TabularColumn} from 'sentry/views/dashboards/widgets/common/types';
+
+export function TabularColumnFixture(params: Partial): TabularColumn {
+ return {
+ key: 'column_key',
+ name: 'column_name',
+ type: 'string',
+ width: -1,
+ ...params,
+ };
+}
diff --git a/tests/js/fixtures/tabularColumns.ts b/tests/js/fixtures/tabularColumns.ts
new file mode 100644
index 00000000000..a3e09352600
--- /dev/null
+++ b/tests/js/fixtures/tabularColumns.ts
@@ -0,0 +1,9 @@
+import {TabularColumnFixture} from 'sentry-fixture/tabularColumn';
+
+import type {TabularColumn} from 'sentry/views/dashboards/widgets/common/types';
+
+export function TabularColumnsFixture(
+ params: Array>
+): TabularColumn[] {
+ return params.map((param: Partial) => TabularColumnFixture(param));
+}
diff --git a/tests/sentry/api/endpoints/test_browser_reporting_collector.py b/tests/sentry/api/endpoints/test_browser_reporting_collector.py
index ccafd5e68a8..2220472a5e9 100644
--- a/tests/sentry/api/endpoints/test_browser_reporting_collector.py
+++ b/tests/sentry/api/endpoints/test_browser_reporting_collector.py
@@ -1,11 +1,46 @@
+from copy import deepcopy
from unittest.mock import MagicMock, patch
from django.urls import reverse
from rest_framework import status
+from rest_framework.response import Response
from sentry.testutils.cases import APITestCase
from sentry.testutils.helpers.options import override_options
+# Working Draft format
+DEPRECATION_REPORT = {
+ "body": {
+ "columnNumber": 12,
+ "id": "RangeExpand",
+ "lineNumber": 31,
+ "message": "Range.expand() is deprecated. Please use Selection.modify() instead.",
+ "sourceFile": "https://dogs.are.great/_next/static/chunks/_4667019e._.js",
+ },
+ "type": "deprecation",
+ "url": "https://dogs.are.great/",
+ "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
+ "destination": "default",
+ "timestamp": 1640995200000, # January 1, 2022 in milliseconds
+ "attempts": 1,
+}
+
+# Editor's Draft format
+INTERVENTION_REPORT = {
+ "body": {
+ "id": "NavigatorVibrate",
+ "message": "The vibrate() method is deprecated.",
+ "sourceFile": "https://dogs.are.great/app.js",
+ "lineNumber": 45,
+ },
+ "type": "intervention",
+ "url": "https://dogs.are.great/page2",
+ "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
+ "destination": "default",
+ "age": 2,
+ "attempts": 1,
+}
+
class BrowserReportingCollectorEndpointTest(APITestCase):
endpoint = "sentry-api-0-reporting-api-experiment"
@@ -14,43 +49,23 @@ def setUp(self) -> None:
super().setUp()
self.url = reverse(self.endpoint)
- self.report_data = [
- {
- "body": {
- "columnNumber": 12,
- "id": "RangeExpand",
- "lineNumber": 31,
- "message": "Range.expand() is deprecated. Please use Selection.modify() instead.",
- "sourceFile": "https://dogs.are.great/_next/static/chunks/_4667019e._.js",
- },
- "type": "deprecation",
- "url": "https://dogs.are.great/",
- "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
- "destination": "default",
- "timestamp": 1640995200000, # January 1, 2022 in milliseconds
- "attempts": 1,
- }
- ]
+ self.report_data = [DEPRECATION_REPORT]
+
+ def assert_invalid_report_data(self, response: Response, details: dict[str, list[str]]) -> None:
+ assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
+ response_data = response.json() # type: ignore[attr-defined]
+ assert response_data["error"] == "Invalid report data"
+ assert response_data["details"] == details
def test_404s_by_default(self) -> None:
response = self.client.post(self.url, self.report_data)
-
assert response.status_code == status.HTTP_404_NOT_FOUND
@override_options({"issues.browser_reporting.collector_endpoint_enabled": True})
@patch("sentry.issues.endpoints.browser_reporting_collector.metrics.incr")
- @patch("sentry.issues.endpoints.browser_reporting_collector.logger.info")
- def test_logs_request_data_if_option_enabled(
- self, mock_logger_info: MagicMock, mock_metrics_incr: MagicMock
- ) -> None:
- response = self.client.post(
- self.url, self.report_data, content_type="application/reports+json"
- )
-
+ def test_basic(self, mock_metrics_incr: MagicMock) -> None:
+ response = self.client.post(self.url, self.report_data)
assert response.status_code == status.HTTP_200_OK
- mock_logger_info.assert_any_call(
- "browser_report_received", extra={"request_body": self.report_data}
- )
mock_metrics_incr.assert_any_call(
"browser_reporting.raw_report_received",
tags={"browser_report_type": "deprecation"},
@@ -62,7 +77,6 @@ def test_logs_request_data_if_option_enabled(
def test_rejects_invalid_content_type(self, mock_metrics_incr: MagicMock) -> None:
"""Test that the endpoint rejects invalid content type and does not call the browser reporting metric"""
response = self.client.post(self.url, self.report_data, content_type="bad/type/json")
-
assert response.status_code == status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
# Verify that the browser_reporting.raw_report_received metric was not called
# Check that none of the calls were for the browser_reporting.raw_report_received metric
@@ -71,51 +85,11 @@ def test_rejects_invalid_content_type(self, mock_metrics_incr: MagicMock) -> Non
@override_options({"issues.browser_reporting.collector_endpoint_enabled": True})
@patch("sentry.issues.endpoints.browser_reporting_collector.metrics.incr")
- @patch("sentry.issues.endpoints.browser_reporting_collector.logger.info")
- def test_handles_multiple_reports(
- self, mock_logger_info: MagicMock, mock_metrics_incr: MagicMock
- ) -> None:
+ def test_handles_multiple_reports_both_specs(self, mock_metrics_incr: MagicMock) -> None:
"""Test that the endpoint handles multiple reports in a single request"""
- multiple_reports = [
- {
- "body": {
- "columnNumber": 12,
- "id": "RangeExpand",
- "lineNumber": 31,
- "message": "Range.expand() is deprecated. Please use Selection.modify() instead.",
- "sourceFile": "https://dogs.are.great/_next/static/chunks/_4667019e._.js",
- },
- "type": "deprecation",
- "url": "https://dogs.are.great/",
- "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
- "destination": "default",
- "timestamp": 1640995200000,
- "attempts": 1,
- },
- {
- "body": {
- "id": "NavigatorVibrate",
- "message": "The vibrate() method is deprecated.",
- "sourceFile": "https://dogs.are.great/app.js",
- "lineNumber": 45,
- },
- "type": "intervention",
- "url": "https://dogs.are.great/page2",
- "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
- "destination": "default",
- "timestamp": 1640995260000,
- "attempts": 1,
- },
- ]
-
- response = self.client.post(
- self.url, multiple_reports, content_type="application/reports+json"
- )
-
+ multiple_reports = [DEPRECATION_REPORT, INTERVENTION_REPORT]
+ response = self.client.post(self.url, multiple_reports)
assert response.status_code == status.HTTP_200_OK
- mock_logger_info.assert_any_call(
- "browser_report_received", extra={"request_body": multiple_reports}
- )
# Should record metrics for each report type
mock_metrics_incr.assert_any_call(
"browser_reporting.raw_report_received",
@@ -127,3 +101,75 @@ def test_handles_multiple_reports(
tags={"browser_report_type": "intervention"},
sample_rate=1.0,
)
+
+ @override_options({"issues.browser_reporting.collector_endpoint_enabled": True})
+ def test_rejects_missing_required_fields(self) -> None:
+ """Test that missing required fields are properly validated"""
+ report = deepcopy(DEPRECATION_REPORT)
+ del report["user_agent"]
+ response = self.client.post(self.url, [report])
+ self.assert_invalid_report_data(response, {"user_agent": ["This field is required."]})
+
+ @override_options({"issues.browser_reporting.collector_endpoint_enabled": True})
+ def test_rejects_invalid_report_type(self) -> None:
+ """Test that invalid report types are rejected"""
+ report = deepcopy(DEPRECATION_REPORT)
+ report["type"] = "invalid-type"
+ response = self.client.post(self.url, [report])
+ self.assert_invalid_report_data(
+ response,
+ {"type": ['"invalid-type" is not a valid choice.']},
+ )
+
+ @override_options({"issues.browser_reporting.collector_endpoint_enabled": True})
+ def test_rejects_invalid_url(self) -> None:
+ """Test that invalid URLs are rejected"""
+ report = deepcopy(DEPRECATION_REPORT)
+ report["url"] = "not-a-valid-url"
+ response = self.client.post(self.url, [report])
+ self.assert_invalid_report_data(response, {"url": ["Enter a valid URL."]})
+
+ @override_options({"issues.browser_reporting.collector_endpoint_enabled": True})
+ def test_rejects_invalid_timestamp(self) -> None:
+ """Test that invalid timestamps are rejected"""
+ report = deepcopy(DEPRECATION_REPORT)
+ report["timestamp"] = -1
+ response = self.client.post(self.url, [report])
+ self.assert_invalid_report_data(
+ response, {"timestamp": ["Ensure this value is greater than or equal to 0."]}
+ )
+
+ @override_options({"issues.browser_reporting.collector_endpoint_enabled": True})
+ def test_rejects_invalid_attempts(self) -> None:
+ """Test that invalid attempts values are rejected"""
+ report = deepcopy(DEPRECATION_REPORT)
+ report["attempts"] = 0
+ response = self.client.post(self.url, [report])
+ self.assert_invalid_report_data(
+ response, {"attempts": ["Ensure this value is greater than or equal to 1."]}
+ )
+
+ @override_options({"issues.browser_reporting.collector_endpoint_enabled": True})
+ def test_rejects_non_dict_body(self) -> None:
+ """Test that non-dict body values are rejected"""
+ report = deepcopy(DEPRECATION_REPORT)
+ report["body"] = "not-a-dict"
+ response = self.client.post(self.url, [report])
+ self.assert_invalid_report_data(
+ response,
+ {"body": ['Expected a dictionary of items but got type "str".']},
+ )
+
+ @override_options({"issues.browser_reporting.collector_endpoint_enabled": True})
+ def test_mixed_fields(self) -> None:
+ """Test that mixed fields are rejected"""
+ report = deepcopy(DEPRECATION_REPORT)
+ report["age"] = 1
+ response = self.client.post(self.url, [report])
+ self.assert_invalid_report_data(
+ response,
+ {
+ "age": ["If age is present, timestamp must be absent"],
+ "timestamp": ["If timestamp is present, age must be absent"],
+ },
+ )
diff --git a/tests/sentry/api/endpoints/test_project_details.py b/tests/sentry/api/endpoints/test_project_details.py
index 411612fe4f9..6be04241249 100644
--- a/tests/sentry/api/endpoints/test_project_details.py
+++ b/tests/sentry/api/endpoints/test_project_details.py
@@ -2065,7 +2065,7 @@ def test_autofix_automation_tuning(self):
"trigger-autofix-on-issue-summary feature enabled"
in resp.data["autofixAutomationTuning"][0]
)
- assert self.project.get_option("sentry:autofix_automation_tuning") == "low" # default
+ assert self.project.get_option("sentry:autofix_automation_tuning") == "off" # default
# Test with feature flag but invalid value - should fail
with self.feature("organizations:trigger-autofix-on-issue-summary"):
@@ -2073,7 +2073,7 @@ def test_autofix_automation_tuning(self):
self.org_slug, self.proj_slug, autofixAutomationTuning="invalid", status_code=400
)
assert '"invalid" is not a valid choice.' in resp.data["autofixAutomationTuning"][0]
- assert self.project.get_option("sentry:autofix_automation_tuning") == "low" # default
+ assert self.project.get_option("sentry:autofix_automation_tuning") == "off" # default
# Test with feature flag and valid value - should succeed
resp = self.get_success_response(
diff --git a/tests/sentry/api/serializers/test_project.py b/tests/sentry/api/serializers/test_project.py
index 5024c2e446f..6a0db58a44a 100644
--- a/tests/sentry/api/serializers/test_project.py
+++ b/tests/sentry/api/serializers/test_project.py
@@ -809,9 +809,9 @@ def test_toolbar_allowed_origins(self):
assert result["options"]["sentry:toolbar_allowed_origins"].split("\n") == origins
def test_autofix_automation_tuning_flag(self):
- # Default is "low"
+ # Default is "off"
result = serialize(self.project, self.user, DetailedProjectSerializer())
- assert result["autofixAutomationTuning"] == "low"
+ assert result["autofixAutomationTuning"] == "off"
# Update the value
self.project.update_option("sentry:autofix_automation_tuning", "high")
diff --git a/tests/sentry/grouping/test_parameterization.py b/tests/sentry/grouping/test_parameterization.py
index 09900621e09..3504abaf28e 100644
--- a/tests/sentry/grouping/test_parameterization.py
+++ b/tests/sentry/grouping/test_parameterization.py
@@ -1,12 +1,6 @@
-from unittest import mock
-
import pytest
-from sentry.grouping.parameterization import (
- ParameterizationRegexExperiment,
- Parameterizer,
- UniqueIdExperiment,
-)
+from sentry.grouping.parameterization import Parameterizer, UniqueIdExperiment
from sentry.grouping.strategies.message import REGEX_PATTERN_KEYS
@@ -227,43 +221,6 @@ def test_parameterize_experiment(name, input, expected, parameterizer):
assert experiments[0] == UniqueIdExperiment
-def test_parameterize_regex_experiment():
- """
- We don't have any of these yet, but we need to test that they work
- """
- FooExperiment = ParameterizationRegexExperiment(name="foo", raw_pattern=r"f[oO]{2}")
-
- parameterizer = Parameterizer(
- regex_pattern_keys=(),
- experiments=(FooExperiment,),
- )
- input_str = "blah foobarbaz fooooo"
- normalized = parameterizer.parameterize_all(input_str)
- assert normalized == "blah barbaz ooo"
- assert len(parameterizer.get_successful_experiments()) == 1
- assert parameterizer.get_successful_experiments()[0] == FooExperiment
-
-
-def test_parameterize_regex_experiment_cached_compiled():
-
- with mock.patch.object(
- ParameterizationRegexExperiment,
- "pattern",
- new_callable=mock.PropertyMock,
- return_value=r"(?Pf[oO]{2})",
- ) as mocked_pattern:
- FooExperiment = ParameterizationRegexExperiment(name="foo", raw_pattern=r"f[oO]{2}")
- parameterizer = Parameterizer(
- regex_pattern_keys=(),
- experiments=(FooExperiment,),
- )
- input_str = "blah foobarbaz fooooo"
- _ = parameterizer.parameterize_all(input_str)
- _ = parameterizer.parameterize_all(input_str)
-
- mocked_pattern.assert_called_once()
-
-
# These are test cases that we should fix
@pytest.mark.xfail()
@pytest.mark.parametrize(
diff --git a/tests/sentry/integrations/github/tasks/test_pr_comment.py b/tests/sentry/integrations/github/tasks/test_pr_comment.py
index 1067ef86de3..9ca79716608 100644
--- a/tests/sentry/integrations/github/tasks/test_pr_comment.py
+++ b/tests/sentry/integrations/github/tasks/test_pr_comment.py
@@ -338,7 +338,12 @@ def test_do_not_ignore_other_issues(self):
class TestGetCommentBody(GithubCommentTestCase):
def test_simple(self):
ev1 = self.store_event(
- data={"message": "issue 1", "culprit": "issue1", "fingerprint": ["group-1"]},
+ data={
+ "message": "issue 1",
+ "culprit": "issue1",
+ "fingerprint": ["group-1"],
+ "environment": "dev",
+ },
project_id=self.project.id,
)
assert ev1.group is not None
@@ -348,7 +353,12 @@ def test_simple(self):
)
assert ev2.group is not None
ev3 = self.store_event(
- data={"message": "issue 3", "culprit": "issue3", "fingerprint": ["group-3"]},
+ data={
+ "message": "issue 3",
+ "culprit": "issue3",
+ "fingerprint": ["group-3"],
+ "environment": "prod",
+ },
project_id=self.project.id,
)
assert ev3.group is not None
@@ -359,9 +369,12 @@ def test_simple(self):
expected_comment = f"""## Suspect Issues
This pull request was deployed and Sentry observed the following issues:
-- ‼️ **issue 1** `issue1` [View Issue](http://testserver/organizations/foo/issues/{ev1.group.id}/?referrer=github-pr-bot)
-- ‼️ **issue 2** `issue2` [View Issue](http://testserver/organizations/foo/issues/{ev2.group.id}/?referrer=github-pr-bot)
-- ‼️ **issue 3** `issue3` [View Issue](http://testserver/organizations/foo/issues/{ev3.group.id}/?referrer=github-pr-bot)
+* ‼️ [**issue 1**](http://testserver/organizations/{self.organization.slug}/issues/{ev1.group.id}/?referrer=github-pr-bot) in `dev`
+
+* ‼️ [**issue 2**](http://testserver/organizations/{self.organization.slug}/issues/{ev2.group.id}/?referrer=github-pr-bot)
+
+* ‼️ [**issue 3**](http://testserver/organizations/{self.organization.slug}/issues/{ev3.group.id}/?referrer=github-pr-bot) in `prod`
+
Did you find this useful? React with a 👍 or 👎"""
assert formatted_comment == expected_comment
@@ -384,7 +397,6 @@ def test_comment_workflow(self, mock_metrics, mock_issues):
group_objs = Group.objects.order_by("id").all()
groups = [g.id for g in group_objs]
titles = [g.title for g in group_objs]
- culprits = [g.culprit for g in group_objs]
mock_issues.return_value = [{"group_id": id, "event_count": 10} for id in groups]
responses.add(
@@ -397,7 +409,7 @@ def test_comment_workflow(self, mock_metrics, mock_issues):
github_comment_workflow(self.pr.id, self.project.id)
assert (
- f'"body": "## Suspect Issues\\nThis pull request was deployed and Sentry observed the following issues:\\n\\n- \\u203c\\ufe0f **{titles[0]}** `{culprits[0]}` [View Issue](http://testserver/organizations/foo/issues/{groups[0]}/?referrer=github-pr-bot)\\n- \\u203c\\ufe0f **{titles[1]}** `{culprits[1]}` [View Issue](http://testserver/organizations/foobar/issues/{groups[1]}/?referrer=github-pr-bot)\\n\\nDid you find this useful? React with a \\ud83d\\udc4d or \\ud83d\\udc4e"'.encode()
+ f'"body": "## Suspect Issues\\nThis pull request was deployed and Sentry observed the following issues:\\n\\n* \\u203c\\ufe0f [**{titles[0]}**](http://testserver/organizations/foo/issues/{groups[0]}/?referrer=github-pr-bot)\\n\\n* \\u203c\\ufe0f [**{titles[1]}**](http://testserver/organizations/foobar/issues/{groups[1]}/?referrer=github-pr-bot)\\n\\n\\nDid you find this useful? React with a \\ud83d\\udc4d or \\ud83d\\udc4e"'.encode()
in responses.calls[0].request.body
)
pull_request_comment_query = PullRequestComment.objects.all()
@@ -413,7 +425,9 @@ def test_comment_workflow(self, mock_metrics, mock_issues):
@responses.activate
@freeze_time(datetime(2023, 6, 8, 0, 0, 0, tzinfo=UTC))
def test_comment_workflow_updates_comment(self, mock_metrics, mock_issues):
- groups = [g.id for g in Group.objects.all()]
+ group_objs = Group.objects.order_by("id").all()
+ groups = [g.id for g in group_objs]
+ titles = [g.title for g in group_objs]
mock_issues.return_value = [{"group_id": id, "event_count": 10} for id in groups]
pull_request_comment = PullRequestComment.objects.create(
external_id=1,
@@ -443,7 +457,7 @@ def test_comment_workflow_updates_comment(self, mock_metrics, mock_issues):
github_comment_workflow(self.pr.id, self.project.id)
assert (
- f'"body": "## Suspect Issues\\nThis pull request was deployed and Sentry observed the following issues:\\n\\n- \\u203c\\ufe0f **issue 1** `issue1` [View Issue](http://testserver/organizations/foo/issues/{groups[0]}/?referrer=github-pr-bot)\\n- \\u203c\\ufe0f **issue 2** `issue2` [View Issue](http://testserver/organizations/foobar/issues/{groups[1]}/?referrer=github-pr-bot)\\n\\nDid you find this useful? React with a \\ud83d\\udc4d or \\ud83d\\udc4e"'.encode()
+ f'"body": "## Suspect Issues\\nThis pull request was deployed and Sentry observed the following issues:\\n\\n* \\u203c\\ufe0f [**{titles[0]}**](http://testserver/organizations/foo/issues/{groups[0]}/?referrer=github-pr-bot)\\n\\n* \\u203c\\ufe0f [**{titles[1]}**](http://testserver/organizations/foobar/issues/{groups[1]}/?referrer=github-pr-bot)\\n\\n\\nDid you find this useful? React with a \\ud83d\\udc4d or \\ud83d\\udc4e"'.encode()
in responses.calls[0].request.body
)
pull_request_comment.refresh_from_db()
diff --git a/tests/sentry/integrations/gitlab/tasks/test_pr_comment.py b/tests/sentry/integrations/gitlab/tasks/test_pr_comment.py
index ec0731fe03e..bf61853d8cb 100644
--- a/tests/sentry/integrations/gitlab/tasks/test_pr_comment.py
+++ b/tests/sentry/integrations/gitlab/tasks/test_pr_comment.py
@@ -299,7 +299,12 @@ def test_do_not_ignore_other_issues(self):
class TestGetCommentBody(GitlabCommentTestCase):
def test_simple(self):
ev1 = self.store_event(
- data={"message": "issue 1", "culprit": "issue1", "fingerprint": ["group-1"]},
+ data={
+ "message": "issue 1",
+ "culprit": "issue1",
+ "fingerprint": ["group-1"],
+ "environment": "dev",
+ },
project_id=self.project.id,
)
assert ev1.group is not None
@@ -309,7 +314,12 @@ def test_simple(self):
)
assert ev2.group is not None
ev3 = self.store_event(
- data={"message": "issue 3", "culprit": "issue3", "fingerprint": ["group-3"]},
+ data={
+ "message": "issue 3",
+ "culprit": "issue3",
+ "fingerprint": ["group-3"],
+ "environment": "prod",
+ },
project_id=self.project.id,
)
assert ev3.group is not None
@@ -320,9 +330,12 @@ def test_simple(self):
expected_comment = f"""## Suspect Issues
This merge request was deployed and Sentry observed the following issues:
-- ‼️ **issue 1** `issue1` [View Issue](http://testserver/organizations/baz/issues/{ev1.group.id}/?referrer=gitlab-pr-bot)
-- ‼️ **issue 2** `issue2` [View Issue](http://testserver/organizations/baz/issues/{ev2.group.id}/?referrer=gitlab-pr-bot)
-- ‼️ **issue 3** `issue3` [View Issue](http://testserver/organizations/baz/issues/{ev3.group.id}/?referrer=gitlab-pr-bot)"""
+* ‼️ [**{ev1.group.title}**](http://testserver/organizations/{self.organization.slug}/issues/{ev1.group.id}/?referrer=gitlab-pr-bot) in `dev`
+
+* ‼️ [**{ev2.group.title}**](http://testserver/organizations/{self.organization.slug}/issues/{ev2.group.id}/?referrer=gitlab-pr-bot)
+
+* ‼️ [**{ev3.group.title}**](http://testserver/organizations/{self.organization.slug}/issues/{ev3.group.id}/?referrer=gitlab-pr-bot) in `prod`
+"""
assert formatted_comment == expected_comment
@@ -343,7 +356,6 @@ def test_comment_workflow(self, mock_metrics, mock_issues):
group_objs = Group.objects.order_by("id").all()
groups = [g.id for g in group_objs]
titles = [g.title for g in group_objs]
- culprits = [g.culprit for g in group_objs]
mock_issues.return_value = [{"group_id": id, "event_count": 10} for id in groups]
responses.add(
@@ -360,8 +372,10 @@ def test_comment_workflow(self, mock_metrics, mock_issues):
## Suspect Issues
This merge request was deployed and Sentry observed the following issues:
-- ‼️ **{titles[0]}** `{culprits[0]}` [View Issue](http://testserver/organizations/baz/issues/{groups[0]}/?referrer=gitlab-pr-bot)
-- ‼️ **{titles[1]}** `{culprits[1]}` [View Issue](http://testserver/organizations/foobar/issues/{groups[1]}/?referrer=gitlab-pr-bot)"""
+* ‼️ [**{titles[0]}**](http://testserver/organizations/{self.organization.slug}/issues/{groups[0]}/?referrer=gitlab-pr-bot)
+
+* ‼️ [**{titles[1]}**](http://testserver/organizations/{self.another_organization.slug}/issues/{groups[1]}/?referrer=gitlab-pr-bot)
+"""
}
pull_request_comment_query = PullRequestComment.objects.all()
@@ -377,7 +391,9 @@ def test_comment_workflow(self, mock_metrics, mock_issues):
@responses.activate
@freeze_time(datetime(2023, 6, 8, 0, 0, 0, tzinfo=UTC))
def test_comment_workflow_updates_comment(self, mock_metrics, mock_issues):
- groups = [g.id for g in Group.objects.all()]
+ group_objs = Group.objects.order_by("id").all()
+ groups = [g.id for g in group_objs]
+ titles = [g.title for g in group_objs]
mock_issues.return_value = [{"group_id": id, "event_count": 10} for id in groups]
pull_request_comment = PullRequestComment.objects.create(
external_id=1,
@@ -411,8 +427,10 @@ def test_comment_workflow_updates_comment(self, mock_metrics, mock_issues):
## Suspect Issues
This merge request was deployed and Sentry observed the following issues:
-- ‼️ **issue 1** `issue1` [View Issue](http://testserver/organizations/baz/issues/{groups[0]}/?referrer=gitlab-pr-bot)
-- ‼️ **issue 2** `issue2` [View Issue](http://testserver/organizations/foobar/issues/{groups[1]}/?referrer=gitlab-pr-bot)"""
+* ‼️ [**{titles[0]}**](http://testserver/organizations/{self.organization.slug}/issues/{groups[0]}/?referrer=gitlab-pr-bot)
+
+* ‼️ [**{titles[1]}**](http://testserver/organizations/{self.another_organization.slug}/issues/{groups[1]}/?referrer=gitlab-pr-bot)
+"""
}
pull_request_comment.refresh_from_db()
diff --git a/tests/sentry/migrations/test_0917_convert_org_saved_searches_to_views.py b/tests/sentry/migrations/test_0917_convert_org_saved_searches_to_views.py
deleted file mode 100644
index afe8595405f..00000000000
--- a/tests/sentry/migrations/test_0917_convert_org_saved_searches_to_views.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from sentry.models.groupsearchview import GroupSearchView
-from sentry.models.savedsearch import SavedSearch, Visibility
-from sentry.testutils.cases import TestMigrations
-
-
-class ConvertOrgSavedSearchesToViewsTest(TestMigrations):
- migrate_from = "0916_delete_open_period_rows"
- migrate_to = "0917_convert_org_saved_searches_to_views"
-
- def setup_initial_state(self):
- self.org = self.create_organization()
- self.user = self.create_user()
-
- self.org_saved_search = SavedSearch.objects.create(
- name="Org Saved Search",
- organization=self.org,
- owner_id=self.user.id,
- visibility=Visibility.ORGANIZATION,
- query="is:unresolved",
- )
-
- self.user_saved_search = SavedSearch.objects.create(
- name="User Saved Search",
- organization=self.org,
- owner_id=self.user.id,
- visibility=Visibility.OWNER,
- query="is:resolved",
- )
-
- def test_convert_org_saved_searches_to_views(self):
- assert GroupSearchView.objects.count() == 1
- org_view = GroupSearchView.objects.get(organization=self.org, user_id=self.user.id)
-
- assert org_view.query == self.org_saved_search.query
diff --git a/tests/sentry/replays/tasks/test_delete_replays_bulk.py b/tests/sentry/replays/tasks/test_delete_replays_bulk.py
index 91900904daa..9198556ac47 100644
--- a/tests/sentry/replays/tasks/test_delete_replays_bulk.py
+++ b/tests/sentry/replays/tasks/test_delete_replays_bulk.py
@@ -94,7 +94,7 @@ def test_run_bulk_replay_delete_job_completion(self, mock_delete_matched_rows, m
{
"retention_days": 90,
"replay_id": "b",
- "max_segment_id": 0,
+ "max_segment_id": None,
"platform": "javascript",
},
],
diff --git a/tests/sentry/replays/test_project_replay_summarize_breadcrumbs.py b/tests/sentry/replays/test_project_replay_summarize_breadcrumbs.py
index f6d83915820..13dd303cf0f 100644
--- a/tests/sentry/replays/test_project_replay_summarize_breadcrumbs.py
+++ b/tests/sentry/replays/test_project_replay_summarize_breadcrumbs.py
@@ -1,18 +1,31 @@
import uuid
import zlib
+from datetime import datetime, timezone
from unittest.mock import patch
+import requests
+from django.conf import settings
from django.urls import reverse
from rest_framework.exceptions import ParseError
-from sentry.replays.endpoints.project_replay_summarize_breadcrumbs import get_request_data
+from sentry import nodestore
+from sentry.eventstore.models import Event
+from sentry.replays.endpoints.project_replay_summarize_breadcrumbs import (
+ ErrorEvent,
+ get_request_data,
+)
from sentry.replays.lib.storage import FilestoreBlob, RecordingSegmentStorageMeta
+from sentry.replays.testutils import mock_replay
from sentry.testutils.cases import TransactionTestCase
+from sentry.testutils.skips import requires_snuba
from sentry.utils import json
# have to use TransactionTestCase because we're using threadpools
-class ProjectReplaySummarizeBreadcrumbsTestCase(TransactionTestCase):
+@requires_snuba
+class ProjectReplaySummarizeBreadcrumbsTestCase(
+ TransactionTestCase,
+):
endpoint = "sentry-api-0-project-replay-summarize-breadcrumbs"
def setUp(self):
@@ -24,6 +37,12 @@ def setUp(self):
args=(self.organization.slug, self.project.slug, self.replay_id),
)
+ def store_replays(self, replay):
+ response = requests.post(
+ settings.SENTRY_SNUBA + "/tests/entities/replays/insert", json=[replay]
+ )
+ assert response.status_code == 200
+
def save_recording_segment(
self, segment_id: int, data: bytes, compressed: bool = True, is_archived: bool = False
) -> None:
@@ -119,6 +138,147 @@ def x(x):
assert response.get("Content-Type") == "application/json"
assert response.json() == {"detail": "e"}
+ @patch("sentry.replays.endpoints.project_replay_summarize_breadcrumbs.make_seer_request")
+ def test_get_with_error(self, make_seer_request):
+ """Test handling of breadcrumbs with error"""
+ return_value = json.dumps({"error": "An error happened"}).encode()
+ make_seer_request.return_value = return_value
+
+ now = datetime.now(timezone.utc)
+ event_id = uuid.uuid4().hex
+ error_timestamp = now.timestamp() - 1
+ self.store_event(
+ data={
+ "event_id": event_id,
+ "timestamp": error_timestamp,
+ "exception": {
+ "values": [
+ {
+ "type": "ZeroDivisionError",
+ "value": "division by zero",
+ }
+ ]
+ },
+ "contexts": {"replay": {"replay_id": self.replay_id}},
+ },
+ project_id=self.project.id,
+ )
+
+ # Ensure the event is stored in nodestore
+ node_id = Event.generate_node_id(self.project.id, event_id)
+ event_data = nodestore.backend.get(node_id)
+ assert event_data is not None, "Event not found in nodestore"
+ assert (
+ event_data.get("exception", {}).get("values", [{}])[0].get("type")
+ == "ZeroDivisionError"
+ )
+
+ self.store_replays(
+ mock_replay(
+ now,
+ self.project.id,
+ self.replay_id,
+ error_ids=[event_id],
+ )
+ )
+
+ data = [
+ {
+ "type": 5,
+ "timestamp": float(now.timestamp()),
+ "data": {
+ "tag": "breadcrumb",
+ "payload": {"category": "console", "message": "hello"},
+ },
+ }
+ ]
+ self.save_recording_segment(0, json.dumps(data).encode())
+
+ with self.feature(
+ {
+ "organizations:session-replay": True,
+ "organizations:replay-ai-summaries": True,
+ "organizations:gen-ai-features": True,
+ }
+ ):
+ response = self.client.get(self.url)
+
+ make_seer_request.assert_called_once()
+ call_args = json.loads(make_seer_request.call_args[0][0])
+ assert "logs" in call_args
+ assert any("ZeroDivisionError" in log for log in call_args["logs"])
+ assert any("division by zero" in log for log in call_args["logs"])
+
+ assert response.status_code == 200
+ assert response.get("Content-Type") == "application/json"
+ assert response.content == return_value
+
+ @patch("sentry.replays.endpoints.project_replay_summarize_breadcrumbs.make_seer_request")
+ def test_get_with_error_context_disabled(self, make_seer_request):
+ """Test handling of breadcrumbs with error context disabled"""
+ return_value = json.dumps({"error": "An error happened"}).encode()
+ make_seer_request.return_value = return_value
+
+ now = datetime.now(timezone.utc)
+ event_id = uuid.uuid4().hex
+ error_timestamp = now.timestamp() - 1
+ self.store_event(
+ data={
+ "event_id": event_id,
+ "timestamp": error_timestamp,
+ "exception": {
+ "values": [
+ {
+ "type": "ZeroDivisionError",
+ "value": "division by zero",
+ }
+ ]
+ },
+ "contexts": {"replay": {"replay_id": self.replay_id}},
+ },
+ project_id=self.project.id,
+ )
+
+ self.store_replays(
+ mock_replay(
+ now,
+ self.project.id,
+ self.replay_id,
+ error_ids=[event_id],
+ )
+ )
+
+ data = [
+ {
+ "type": 5,
+ "timestamp": float(now.timestamp()),
+ "data": {
+ "tag": "breadcrumb",
+ "payload": {"category": "console", "message": "hello"},
+ },
+ }
+ ]
+ self.save_recording_segment(0, json.dumps(data).encode())
+
+ with self.feature(
+ {
+ "organizations:session-replay": True,
+ "organizations:replay-ai-summaries": True,
+ "organizations:gen-ai-features": True,
+ }
+ ):
+ response = self.client.get(self.url, {"enable_error_context": "false"})
+
+ make_seer_request.assert_called_once()
+ call_args = json.loads(make_seer_request.call_args[0][0])
+ assert "logs" in call_args
+ assert not any("ZeroDivisionError" in log for log in call_args["logs"])
+ assert not any("division by zero" in log for log in call_args["logs"])
+
+ assert response.status_code == 200
+ assert response.get("Content-Type") == "application/json"
+ assert response.content == return_value
+
def test_get_request_data():
def _faker():
@@ -127,7 +287,7 @@ def _faker():
[
{
"type": 5,
- "timestamp": 0.0,
+ "timestamp": 1.5,
"data": {
"tag": "breadcrumb",
"payload": {"category": "console", "message": "hello"},
@@ -135,7 +295,7 @@ def _faker():
},
{
"type": 5,
- "timestamp": 0.0,
+ "timestamp": 2.0,
"data": {
"tag": "breadcrumb",
"payload": {"category": "console", "message": "world"},
@@ -145,5 +305,27 @@ def _faker():
).encode()
)
- result = get_request_data(_faker())
- assert result == ["Logged: hello at 0.0", "Logged: world at 0.0"]
+ error_events = [
+ ErrorEvent(
+ category="error",
+ id="123",
+ title="ZeroDivisionError",
+ timestamp=3.0,
+ message="division by zero",
+ ),
+ ErrorEvent(
+ category="error",
+ id="234",
+ title="BadError",
+ timestamp=1.0,
+ message="something else bad",
+ ),
+ ]
+
+ result = get_request_data(_faker(), error_events=error_events)
+ assert result == [
+ "User experienced an error: 'BadError: something else bad' at 1.0",
+ "Logged: hello at 1.5",
+ "Logged: world at 2.0",
+ "User experienced an error: 'ZeroDivisionError: division by zero' at 3.0",
+ ]
diff --git a/tests/sentry/workflow_engine/endpoints/test_organization_detector_details.py b/tests/sentry/workflow_engine/endpoints/test_organization_detector_details.py
index 68aa310f76f..f6d1ce6cffb 100644
--- a/tests/sentry/workflow_engine/endpoints/test_organization_detector_details.py
+++ b/tests/sentry/workflow_engine/endpoints/test_organization_detector_details.py
@@ -241,6 +241,95 @@ def test_update_bad_schema(self):
status_code=400,
)
+ def test_update_owner_to_user(self):
+ # Initially no owner
+ assert self.detector.owner_user_id is None
+ assert self.detector.owner_team_id is None
+
+ data = {
+ **self.valid_data,
+ "owner": self.user.get_actor_identifier(),
+ }
+
+ with self.tasks():
+ response = self.get_success_response(
+ self.organization.slug,
+ self.detector.id,
+ **data,
+ status_code=200,
+ )
+
+ detector = Detector.objects.get(id=response.data["id"])
+
+ # Verify owner is set correctly
+ assert detector.owner_user_id == self.user.id
+ assert detector.owner_team_id is None
+ assert detector.owner is not None
+ assert detector.owner.identifier == self.user.get_actor_identifier()
+
+ # Verify serialized response includes owner
+ assert response.data["owner"] == self.user.get_actor_identifier()
+
+ def test_update_owner_to_team(self):
+ # Set initial user owner
+ self.detector.owner_user_id = self.user.id
+ self.detector.save()
+
+ # Create a team
+ team = self.create_team(organization=self.organization)
+
+ data = {
+ **self.valid_data,
+ "owner": f"team:{team.id}",
+ }
+
+ with self.tasks():
+ response = self.get_success_response(
+ self.organization.slug,
+ self.detector.id,
+ **data,
+ status_code=200,
+ )
+
+ detector = Detector.objects.get(id=response.data["id"])
+
+ # Verify owner changed to team
+ assert detector.owner_user_id is None
+ assert detector.owner_team_id == team.id
+ assert detector.owner is not None
+ assert detector.owner.identifier == f"team:{team.id}"
+
+ # Verify serialized response includes team owner
+ assert response.data["owner"] == f"team:{team.id}"
+
+ def test_update_clear_owner(self):
+ # Set initial owner
+ self.detector.owner_user_id = self.user.id
+ self.detector.save()
+
+ data = {
+ **self.valid_data,
+ "owner": None,
+ }
+
+ with self.tasks():
+ response = self.get_success_response(
+ self.organization.slug,
+ self.detector.id,
+ **data,
+ status_code=200,
+ )
+
+ detector = Detector.objects.get(id=response.data["id"])
+
+ # Verify owner is cleared
+ assert detector.owner_user_id is None
+ assert detector.owner_team_id is None
+ assert detector.owner is None
+
+ # Verify serialized response shows no owner
+ assert response.data["owner"] is None
+
@region_silo_test
class OrganizationDetectorDetailsDeleteTest(OrganizationDetectorDetailsBaseTest):
diff --git a/tests/sentry/workflow_engine/endpoints/test_organization_detector_index.py b/tests/sentry/workflow_engine/endpoints/test_organization_detector_index.py
index 009d6d3bcd7..171e55d9840 100644
--- a/tests/sentry/workflow_engine/endpoints/test_organization_detector_index.py
+++ b/tests/sentry/workflow_engine/endpoints/test_organization_detector_index.py
@@ -454,3 +454,89 @@ def test_empty_query_string(self):
query_sub = QuerySubscription.objects.get(id=int(data_source.source_id))
assert query_sub.snuba_query.query == ""
+
+ def test_valid_creation_with_owner(self):
+ # Test data with owner field
+ data_with_owner = {
+ **self.valid_data,
+ "owner": self.user.get_actor_identifier(),
+ }
+
+ with self.tasks():
+ response = self.get_success_response(
+ self.organization.slug,
+ **data_with_owner,
+ status_code=201,
+ )
+
+ detector = Detector.objects.get(id=response.data["id"])
+
+ # Verify owner is set correctly
+ assert detector.owner_user_id == self.user.id
+ assert detector.owner_team_id is None
+ assert detector.owner is not None
+ assert detector.owner.identifier == self.user.get_actor_identifier()
+
+ # Verify serialized response includes owner
+ assert response.data["owner"] == self.user.get_actor_identifier()
+
+ def test_valid_creation_with_team_owner(self):
+ # Create a team for testing
+ team = self.create_team(organization=self.organization)
+
+ # Test data with team owner
+ data_with_team_owner = {
+ **self.valid_data,
+ "owner": f"team:{team.id}",
+ }
+
+ with self.tasks():
+ response = self.get_success_response(
+ self.organization.slug,
+ **data_with_team_owner,
+ status_code=201,
+ )
+
+ detector = Detector.objects.get(id=response.data["id"])
+
+ # Verify team owner is set correctly
+ assert detector.owner_user_id is None
+ assert detector.owner_team_id == team.id
+ assert detector.owner is not None
+ assert detector.owner.identifier == f"team:{team.id}"
+
+ # Verify serialized response includes team owner
+ assert response.data["owner"] == f"team:{team.id}"
+
+ def test_invalid_owner(self):
+ # Test with invalid owner format
+ data_with_invalid_owner = {
+ **self.valid_data,
+ "owner": "invalid:owner:format",
+ }
+
+ response = self.get_error_response(
+ self.organization.slug,
+ **data_with_invalid_owner,
+ status_code=400,
+ )
+ assert "owner" in response.data
+
+ def test_owner_not_in_organization(self):
+ # Create a user in another organization
+ other_org = self.create_organization()
+ other_user = self.create_user()
+ self.create_member(organization=other_org, user=other_user)
+
+ # Test with owner not in current organization
+ data_with_invalid_owner = {
+ **self.valid_data,
+ "owner": other_user.get_actor_identifier(),
+ }
+
+ response = self.get_error_response(
+ self.organization.slug,
+ **data_with_invalid_owner,
+ status_code=400,
+ )
+ assert "owner" in response.data
diff --git a/tests/sentry/workflow_engine/processors/test_delayed_workflow.py b/tests/sentry/workflow_engine/processors/test_delayed_workflow.py
index 413f056d68e..297e02b38db 100644
--- a/tests/sentry/workflow_engine/processors/test_delayed_workflow.py
+++ b/tests/sentry/workflow_engine/processors/test_delayed_workflow.py
@@ -38,7 +38,10 @@
SLOW_CONDITIONS,
Condition,
)
-from sentry.workflow_engine.processors.data_condition_group import ProcessedDataConditionGroup
+from sentry.workflow_engine.processors.data_condition_group import (
+ ProcessedDataConditionGroup,
+ get_slow_conditions_for_groups,
+)
from sentry.workflow_engine.processors.delayed_workflow import (
EventInstance,
EventKey,
@@ -403,7 +406,10 @@ def test_get_condition_query_groups(self):
mock_event_data.dcg_to_groups = dcg_to_groups
mock_event_data.dcg_to_workflow = dcg_to_workflow
- result = get_condition_query_groups(dcgs, mock_event_data, workflows_to_envs)
+ dcg_to_slow_conditions = get_slow_conditions_for_groups(list(dcg_to_groups.keys()))
+ result = get_condition_query_groups(
+ dcgs, mock_event_data, workflows_to_envs, dcg_to_slow_conditions
+ )
count_query = generate_unique_queries(self.count_dc, None)[0]
percent_only_query = generate_unique_queries(self.percent_dc, None)[1]
@@ -611,12 +617,15 @@ def setUp(self):
}
)
+ self.dcg_to_slow_conditions = get_slow_conditions_for_groups(list(self.event_data.dcg_ids))
+
def test_simple(self):
result = get_groups_to_fire(
self.data_condition_groups,
self.workflows_to_envs,
self.event_data,
self.condition_group_results,
+ self.dcg_to_slow_conditions,
)
assert result == {
@@ -640,6 +649,7 @@ def test_dcg_all_fails(self):
self.workflows_to_envs,
self.event_data,
self.condition_group_results,
+ self.dcg_to_slow_conditions,
)
assert result == {
@@ -661,6 +671,7 @@ def test_dcg_any_fails(self):
self.workflows_to_envs,
self.event_data,
self.condition_group_results,
+ self.dcg_to_slow_conditions,
)
assert result == {
@@ -691,6 +702,7 @@ def test_multiple_dcgs_per_group(self):
self.workflows_to_envs,
event_data,
self.condition_group_results,
+ self.dcg_to_slow_conditions,
)
assert result == {
self.group1.id: set(self.workflow1_dcgs + [self.workflow2_dcgs[0]]),
diff --git a/tests/snuba/api/endpoints/test_organization_events_stats.py b/tests/snuba/api/endpoints/test_organization_events_stats.py
index 9a1ea94fc4e..71c12586339 100644
--- a/tests/snuba/api/endpoints/test_organization_events_stats.py
+++ b/tests/snuba/api/endpoints/test_organization_events_stats.py
@@ -1,8 +1,9 @@
from __future__ import annotations
import uuid
+from collections import defaultdict
from datetime import datetime, timedelta
-from typing import Any, TypedDict
+from typing import Any, DefaultDict, TypedDict
from unittest import mock
from uuid import uuid4
@@ -18,7 +19,7 @@
from sentry.models.project import Project
from sentry.models.transaction_threshold import ProjectTransactionThreshold, TransactionMetric
from sentry.snuba.discover import OTHER_KEY
-from sentry.testutils.cases import APITestCase, ProfilesSnubaTestCase, SnubaTestCase
+from sentry.testutils.cases import APITestCase, OurLogTestCase, ProfilesSnubaTestCase, SnubaTestCase
from sentry.testutils.helpers.datetime import before_now
from sentry.utils.samples import load_data
from tests.sentry.issues.test_utils import SearchIssueTestMixin
@@ -1246,7 +1247,7 @@ def test_group_id_tag_simple(self):
assert all([interval[1][0]["count"] == 0 for interval in response.data["data"]])
-class OrganizationEventsStatsTopNEvents(APITestCase, SnubaTestCase):
+class OrganizationEventsStatsTopNEventsSpans(APITestCase, SnubaTestCase):
def setUp(self):
super().setUp()
self.login_as(user=self.user)
@@ -3082,6 +3083,112 @@ def test_functions_dataset_simple(self):
}
+class OrganizationEventsStatsTopNEventsLogs(APITestCase, SnubaTestCase, OurLogTestCase):
+ # This is implemented almost exactly the same as spans, add a simple test case for a sanity check
+ def setUp(self):
+ super().setUp()
+ self.login_as(user=self.user)
+
+ self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
+
+ self.project = self.create_project()
+ self.logs = (
+ [
+ self.create_ourlog(
+ {"body": "zero seconds"},
+ timestamp=self.day_ago + timedelta(microseconds=i),
+ )
+ for i in range(10)
+ ]
+ + [
+ self.create_ourlog(
+ {"body": "five seconds"},
+ timestamp=self.day_ago + timedelta(seconds=5, microseconds=i),
+ )
+ for i in range(20)
+ ]
+ + [
+ self.create_ourlog(
+ {"body": "ten seconds"},
+ timestamp=self.day_ago + timedelta(seconds=10, microseconds=i),
+ )
+ for i in range(30)
+ ]
+ + [
+ self.create_ourlog(
+ {"body": "fifteen seconds"},
+ timestamp=self.day_ago + timedelta(seconds=15, microseconds=i),
+ )
+ for i in range(40)
+ ]
+ + [
+ self.create_ourlog(
+ {"body": "twenty seconds"},
+ timestamp=self.day_ago + timedelta(seconds=20, microseconds=i),
+ )
+ for i in range(50)
+ ]
+ + [
+ self.create_ourlog(
+ {"body": "twenty five seconds"},
+ timestamp=self.day_ago + timedelta(seconds=25, microseconds=i),
+ )
+ for i in range(60)
+ ]
+ )
+ self.store_ourlogs(self.logs)
+
+ self.enabled_features = {
+ "organizations:discover-basic": True,
+ "organizations:ourlogs-enabled": True,
+ }
+ self.url = reverse(
+ "sentry-api-0-organization-events-stats",
+ kwargs={"organization_id_or_slug": self.project.organization.slug},
+ )
+
+ def test_simple_top_events(self):
+ with self.feature(self.enabled_features):
+ response = self.client.get(
+ self.url,
+ data={
+ "start": self.day_ago.isoformat(),
+ "end": (self.day_ago + timedelta(hours=2)).isoformat(),
+ "dataset": "ourlogs",
+ "interval": "1h",
+ "yAxis": "count()",
+ "orderby": ["-count()"],
+ "field": ["count()", "message"],
+ "topEvents": "5",
+ },
+ format="json",
+ )
+
+ data = response.data
+ assert response.status_code == 200, response.content
+
+ expected_message_counts_dict: DefaultDict[str, int] = defaultdict(int)
+ for log in self.logs:
+ attr = log.attributes.get("sentry.body")
+ if attr is not None:
+ body = attr.string_value
+ expected_message_counts_dict[body] += 1
+
+ expected_message_counts: list[tuple[str, int]] = sorted(
+ expected_message_counts_dict.items(), key=lambda x: x[1], reverse=True
+ )
+
+ assert set(data.keys()) == {x[0] for x in expected_message_counts[:5]}.union({"Other"})
+
+ for index, (message, count) in enumerate(expected_message_counts[:5]):
+ assert [{"count": count}] in data[message]["data"][0]
+ assert data[message]["order"] == index
+
+ other = data["Other"]
+ assert other["order"] == 5
+ assert [{"count": 10}] in other["data"][0]
+
+
class OrganizationEventsStatsTopNEventsErrors(APITestCase, SnubaTestCase):
def setUp(self):
super().setUp()