Skip to content

Commit 44b71e7

Browse files
committed
Update test_runner.py
1 parent c3c6ef3 commit 44b71e7

File tree

1 file changed

+24
-15
lines changed

1 file changed

+24
-15
lines changed

codeflash/verification/test_runner.py

Lines changed: 24 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ def run_behavioral_tests(
3030
test_framework: str,
3131
test_env: dict[str, str],
3232
cwd: Path,
33+
*,
3334
pytest_timeout: int | None = None,
3435
pytest_cmd: str = "pytest",
3536
verbose: bool = False,
@@ -59,7 +60,7 @@ def run_behavioral_tests(
5960
"--codeflash_loops_scope=session",
6061
"--codeflash_min_loops=1",
6162
"--codeflash_max_loops=1",
62-
f"--codeflash_seconds={pytest_target_runtime_seconds}", # TODO :This is unnecessary, update the plugin to not ask for this
63+
f"--codeflash_seconds={pytest_target_runtime_seconds}", # TODO : This is unnecessary, update the plugin to not ask for this
6364
]
6465

6566
result_file_path = get_run_tmp_file(Path("pytest_results.xml"))
@@ -77,18 +78,16 @@ def run_behavioral_tests(
7778
# then the current run will be appended to the previous data, which skews the results
7879
logger.debug(cov_erase)
7980

81+
coverage_cmd = f"{SAFE_SYS_EXECUTABLE} -m coverage run --rcfile={coveragercfile.as_posix()} -m"
8082
results = execute_test_subprocess(
81-
shlex.split(f"{SAFE_SYS_EXECUTABLE} -m coverage run --rcfile={coveragercfile.as_posix()} -m")
82-
+ pytest_cmd_list
83-
+ common_pytest_args
84-
+ result_args
85-
+ test_files,
83+
shlex.split(coverage_cmd) + pytest_cmd_list + common_pytest_args + result_args + test_files,
8684
cwd=cwd,
8785
env=pytest_test_env,
8886
timeout=600,
8987
)
9088
logger.debug(
91-
f"""Result return code: {results.returncode}, {"Result stderr:" + str(results.stderr) if results.stderr else ""}"""
89+
f"Result return code: {results.returncode}"
90+
f"{', Result stderr:' + str(results.stderr) if results.stderr else ''}"
9291
)
9392
else:
9493
results = execute_test_subprocess(
@@ -98,19 +97,23 @@ def run_behavioral_tests(
9897
timeout=600, # TODO: Make this dynamic
9998
)
10099
logger.debug(
101-
f"""Result return code: {results.returncode}, {"Result stderr:" + str(results.stderr) if results.stderr else ""}"""
100+
f"Result return code: {results.returncode}"
101+
f"{', Result stderr:' + str(results.stderr) if results.stderr else ''}"
102102
)
103103
elif test_framework == "unittest":
104104
if enable_coverage:
105-
raise ValueError("Coverage is not supported yet for unittest framework")
105+
msg = "Coverage is not supported yet for unittest framework"
106+
raise ValueError(msg)
106107
test_env["CODEFLASH_LOOP_INDEX"] = "1"
107108
test_files = [file.instrumented_behavior_file_path for file in test_paths.test_files]
108109
result_file_path, results = run_unittest_tests(verbose, test_files, test_env, cwd)
109110
logger.debug(
110-
f"""Result return code: {results.returncode}, {"Result stderr:" + str(results.stderr) if results.stderr else ""}"""
111+
f"Result return code: {results.returncode}"
112+
f"{', Result stderr:' + str(results.stderr) if results.stderr else ''}"
111113
)
112114
else:
113-
raise ValueError(f"Unsupported test framework: {test_framework}")
115+
msg = f"Unsupported test framework: {test_framework}"
116+
raise ValueError(msg)
114117

115118
return result_file_path, results, coverage_database_file if enable_coverage else None
116119

@@ -121,12 +124,13 @@ def run_benchmarking_tests(
121124
test_env: dict[str, str],
122125
cwd: Path,
123126
test_framework: str,
127+
*,
124128
pytest_target_runtime_seconds: float = TOTAL_LOOPING_TIME,
125129
verbose: bool = False,
126130
pytest_timeout: int | None = None,
127131
pytest_min_loops: int = 5,
128132
pytest_max_loops: int = 100_000,
129-
):
133+
) -> tuple[Path, subprocess.CompletedProcess]:
130134
if test_framework == "pytest":
131135
pytest_cmd_list = shlex.split(pytest_cmd, posix=IS_POSIX)
132136
test_files: list[str] = []
@@ -165,13 +169,18 @@ def run_benchmarking_tests(
165169
)
166170
elif test_framework == "unittest":
167171
test_files = [file.benchmarking_file_path for file in test_paths.test_files]
168-
result_file_path, results = run_unittest_tests(verbose, test_files, test_env, cwd)
172+
result_file_path, results = run_unittest_tests(
173+
verbose=verbose, test_file_paths=test_files, test_env=test_env, cwd=cwd
174+
)
169175
else:
170-
raise ValueError(f"Unsupported test framework: {test_framework}")
176+
msg = f"Unsupported test framework: {test_framework}"
177+
raise ValueError(msg)
171178
return result_file_path, results
172179

173180

174-
def run_unittest_tests(verbose: bool, test_file_paths: list[Path], test_env: dict[str, str], cwd: Path):
181+
def run_unittest_tests(
182+
*, verbose: bool, test_file_paths: list[Path], test_env: dict[str, str], cwd: Path
183+
) -> tuple[Path, subprocess.CompletedProcess]:
175184
result_file_path = get_run_tmp_file(Path("unittest_results.xml"))
176185
unittest_cmd_list = [SAFE_SYS_EXECUTABLE, "-m", "xmlrunner"]
177186
log_level = ["-v"] if verbose else []

0 commit comments

Comments
 (0)