@@ -11,18 +11,7 @@ def _get_rule(title):
1111 return rule_id [0 ]
1212
1313def test_scorecards ():
14- # Retry scorecard create in case there's an active evaluation
15- # (can happen if test_import.py just triggered an evaluation)
16- max_retries = 3
17- for attempt in range (max_retries ):
18- try :
19- cli (["scorecards" , "create" , "-f" , "data/import/scorecards/cli-test-scorecard.yaml" ])
20- break
21- except Exception as e :
22- if "500" in str (e ) and attempt < max_retries - 1 :
23- time .sleep (2 ** attempt ) # Exponential backoff: 1s, 2s
24- continue
25- raise
14+ cli (["scorecards" , "create" , "-f" , "data/import/scorecards/cli-test-scorecard.yaml" ])
2615
2716 response = cli (["scorecards" , "list" ])
2817 assert any (scorecard ['tag' ] == 'cli-test-scorecard' for scorecard in response ['scorecards' ]), "Should find scorecard with tag cli-test-scorecard"
@@ -39,33 +28,30 @@ def test_scorecards():
3928 # cannot rely on a scorecard evaluation being complete, so not performing any validation
4029 cli (["scorecards" , "next-steps" , "-s" , "cli-test-scorecard" , "-t" , "cli-test-service" ])
4130
42- # Test trigger-evaluation command (accepts both success and 409 Already evaluating)
43- response = cli (["scorecards" , "trigger-evaluation" , "-s" , "cli-test-scorecard" , "-e" , "cli-test-service" ], return_type = ReturnType .STDOUT )
44- assert ("Scorecard evaluation triggered successfully" in response or "Already evaluating scorecard" in response ), \
45- "Should receive success message or 409 Already evaluating error"
46-
4731 # cannot rely on a scorecard evaluation being complete, so not performing any validation
4832 #response = cli(["scorecards", "scores", "-s", "cli-test-scorecard", "-t", "cli-test-service"])
4933 #assert response['scorecardTag'] == "cli-test-scorecard", "Should get valid response that include cli-test-scorecard"
50-
34+
5135# # Not sure if we can run this cli right away. Newly-created Scorecard might not be evaluated yet.
5236# # 2024-05-06, additionally now blocked by CET-8882
5337# # cli(["scorecards", "scores", "-t", "cli-test-scorecard", "-e", "cli-test-service"])
5438#
5539# cli(["scorecards", "scores", "-t", "cli-test-scorecard"])
56-
40+
41+ def test_scorecard_trigger_evaluation ():
42+ # Create a dedicated scorecard for trigger-evaluation testing to avoid conflicts with import
43+ cli (["scorecards" , "create" , "-f" , "data/import/scorecards/cli-test-evaluation-scorecard.yaml" ])
44+
45+ # Test trigger-evaluation command (accepts both success and 409 Already evaluating)
46+ response = cli (["scorecards" , "trigger-evaluation" , "-s" , "cli-test-evaluation-scorecard" , "-e" , "cli-test-service" ], return_type = ReturnType .STDOUT )
47+ assert ("Scorecard evaluation triggered successfully" in response or "Already evaluating scorecard" in response ), \
48+ "Should receive success message or 409 Already evaluating error"
49+
50+ # Clean up
51+ cli (["scorecards" , "delete" , "-s" , "cli-test-evaluation-scorecard" ])
52+
5753def test_scorecards_drafts ():
58- # Retry scorecard create in case there's an active evaluation
59- max_retries = 3
60- for attempt in range (max_retries ):
61- try :
62- cli (["scorecards" , "create" , "-f" , "data/import/scorecards/cli-test-draft-scorecard.yaml" ])
63- break
64- except Exception as e :
65- if "500" in str (e ) and attempt < max_retries - 1 :
66- time .sleep (2 ** attempt ) # Exponential backoff: 1s, 2s
67- continue
68- raise
54+ cli (["scorecards" , "create" , "-f" , "data/import/scorecards/cli-test-draft-scorecard.yaml" ])
6955
7056 response = cli (["scorecards" , "list" , "-s" ])
7157 assert any (scorecard ['tag' ] == 'cli-test-draft-scorecard' for scorecard in response ['scorecards' ])
0 commit comments