Skip to content

Commit 3b0db87

Browse files
authored
Merge branch 'main' into compile
2 parents 7b0c8d9 + d53e6d2 commit 3b0db87

File tree

3 files changed

+21
-1
lines changed

3 files changed

+21
-1
lines changed

codeflash/result/explanation.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,15 @@ class Explanation:
3333

3434
@property
3535
def perf_improvement_line(self) -> str:
36+
improvement_type = {
37+
AcceptanceReason.RUNTIME: "runtime",
38+
AcceptanceReason.THROUGHPUT: "throughput",
39+
AcceptanceReason.CONCURRENCY: "concurrency",
40+
AcceptanceReason.NONE: "",
41+
}.get(self.acceptance_reason, "")
42+
43+
if improvement_type:
44+
return f"{self.speedup_pct} {improvement_type} improvement ({self.speedup_x} faster)."
3645
return f"{self.speedup_pct} improvement ({self.speedup_x} faster)."
3746

3847
@property

tests/scripts/end_to_end_test_async.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ def run_test(expected_improvement_pct: int) -> bool:
88
config = TestConfig(
99
file_path="main.py",
1010
min_improvement_x=0.1,
11+
expected_acceptance_reason="concurrency",
1112
coverage_expectations=[
1213
CoverageExpectation(
1314
function_name="retry_with_backoff",

tests/scripts/end_to_end_test_utilities.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ class TestConfig:
3737
benchmarks_root: Optional[pathlib.Path] = None
3838
use_worktree: bool = False
3939
no_gen_tests: bool = False
40+
expected_acceptance_reason: Optional[str] = None # "runtime", "throughput", "concurrency"
4041

4142

4243
def clear_directory(directory_path: str | pathlib.Path) -> None:
@@ -176,7 +177,7 @@ def validate_output(stdout: str, return_code: int, expected_improvement_pct: int
176177
logging.error("Failed to find performance improvement message")
177178
return False
178179

179-
improvement_match = re.search(r"📈 ([\d,]+)% improvement", stdout)
180+
improvement_match = re.search(r"📈 ([\d,]+)% (?:(\w+) )?improvement", stdout)
180181
if not improvement_match:
181182
logging.error("Could not find improvement percentage in output")
182183
return False
@@ -193,6 +194,15 @@ def validate_output(stdout: str, return_code: int, expected_improvement_pct: int
193194
logging.error(f"Performance improvement rate {improvement_x}x not above {config.min_improvement_x}x")
194195
return False
195196

197+
if config.expected_acceptance_reason is not None:
198+
actual_reason = improvement_match.group(2)
199+
if not actual_reason:
200+
logging.error("Could not find acceptance reason type in output")
201+
return False
202+
if actual_reason != config.expected_acceptance_reason:
203+
logging.error(f"Expected acceptance reason '{config.expected_acceptance_reason}', got '{actual_reason}'")
204+
return False
205+
196206
if config.expected_unit_tests_count is not None:
197207
# Match the global test discovery message from optimizer.py which counts test invocations
198208
# Format: "Discovered X existing unit tests and Y replay tests in Z.Zs at /path/to/tests"

0 commit comments

Comments
 (0)