6
6
7
7
import pytest
8
8
import responses
9
- from django .conf import settings
10
9
11
10
from sentry .feedback .lib .utils import FeedbackCreationSource
12
11
from sentry .feedback .usecases .ingest .create_feedback import (
19
18
MAX_AI_LABELS ,
20
19
MAX_AI_LABELS_JSON_LENGTH ,
21
20
)
21
+ from sentry .feedback .usecases .title_generation import SEER_GENERATE_TITLE_URL
22
22
from sentry .models .group import Group , GroupStatus
23
23
from sentry .signals import first_feedback_received , first_new_feedback_received
24
24
from sentry .testutils .helpers import Feature
28
28
from tests .sentry .feedback import create_dummy_openai_response , mock_feedback_event
29
29
30
30
31
- def mock_seer_response (** kwargs ) -> None :
32
- """Use with @responses.activate to mock Seer API responses ."""
31
+ def mock_seer_title_response (** kwargs ) -> None :
32
+ """Use with @responses.activate to mock Seer title generation response ."""
33
33
responses .add (
34
34
responses .POST ,
35
- f" { settings . SEER_AUTOFIX_URL } /v1/automation/summarize/feedback/title" ,
35
+ SEER_GENERATE_TITLE_URL ,
36
36
** kwargs ,
37
37
)
38
38
@@ -944,7 +944,7 @@ def test_create_feedback_issue_title_from_seer(
944
944
event = mock_feedback_event (default_project .id )
945
945
event ["contexts" ]["feedback" ]["message" ] = "The login button is broken and the UI is slow"
946
946
947
- mock_seer_response (
947
+ mock_seer_title_response (
948
948
status = 200 ,
949
949
body = '{"title": "Login Button Issue"}' ,
950
950
)
@@ -970,7 +970,7 @@ def test_create_feedback_issue_title_from_seer_fallback(
970
970
event = mock_feedback_event (default_project .id )
971
971
event ["contexts" ]["feedback" ]["message" ] = "The login button is broken and the UI is slow"
972
972
973
- mock_seer_response (body = Exception ("Network Error" ))
973
+ mock_seer_title_response (body = Exception ("Network Error" ))
974
974
create_feedback_issue (event , default_project , FeedbackCreationSource .NEW_FEEDBACK_ENVELOPE )
975
975
976
976
assert mock_produce_occurrence_to_kafka .call_count == 1
@@ -980,6 +980,33 @@ def test_create_feedback_issue_title_from_seer_fallback(
980
980
)
981
981
982
982
983
+ @django_db_all
984
+ @responses .activate
985
+ def test_create_feedback_issue_title_from_seer_skips_if_spam (
986
+ default_project ,
987
+ mock_produce_occurrence_to_kafka ,
988
+ ) -> None :
989
+ """Test title generation endpoint is not called if marked as spam."""
990
+ with (
991
+ patch ("sentry.feedback.usecases.ingest.create_feedback.is_spam" , return_value = True ),
992
+ # XXX: this is not ideal to mock, we should refactor spam and AI processors to their own unit testable function.
993
+ patch (
994
+ "sentry.feedback.usecases.ingest.create_feedback.spam_detection_enabled" ,
995
+ return_value = True ,
996
+ ),
997
+ Feature (
998
+ {
999
+ "organizations:gen-ai-features" : True ,
1000
+ "organizations:user-feedback-ai-titles" : True ,
1001
+ }
1002
+ ),
1003
+ ):
1004
+ event = mock_feedback_event (default_project .id )
1005
+ create_feedback_issue (event , default_project , FeedbackCreationSource .NEW_FEEDBACK_ENVELOPE )
1006
+ urls = [call .request .url for call in responses .calls ]
1007
+ assert SEER_GENERATE_TITLE_URL not in urls
1008
+
1009
+
983
1010
@django_db_all
984
1011
@responses .activate
985
1012
def test_create_feedback_issue_title_from_seer_none (
@@ -995,7 +1022,7 @@ def test_create_feedback_issue_title_from_seer_none(
995
1022
event = mock_feedback_event (default_project .id )
996
1023
event ["contexts" ]["feedback" ]["message" ] = "The login button is broken and the UI is slow"
997
1024
998
- mock_seer_response (
1025
+ mock_seer_title_response (
999
1026
status = 200 ,
1000
1027
body = '{"title": ""}' ,
1001
1028
)
0 commit comments