@@ -50,9 +50,12 @@ def run_behavioral_tests(
5050 )
5151 else :
5252 test_files .append (str (file .instrumented_behavior_file_path ))
53+ pytest_cmd_list = (
54+ shlex .split (f"{ SAFE_SYS_EXECUTABLE } -m pytest" , posix = IS_POSIX )
55+ if pytest_cmd == "pytest"
56+ else [SAFE_SYS_EXECUTABLE , "-m" , * shlex .split (pytest_cmd , posix = IS_POSIX )]
57+ )
5358 test_files = list (set (test_files )) # remove multiple calls in the same test function
54- pytest_cmd_list = shlex .split (pytest_cmd , posix = IS_POSIX )
55-
5659 common_pytest_args = [
5760 "--capture=tee-sys" ,
5861 f"--timeout={ pytest_timeout } " ,
@@ -77,17 +80,19 @@ def run_behavioral_tests(
7780 ) # this cleanup is necessary to avoid coverage data from previous runs, if there are any,
7881 # then the current run will be appended to the previous data, which skews the results
7982 logger .debug (cov_erase )
83+ coverage_cmd = [SAFE_SYS_EXECUTABLE , "-m" , "coverage" , "run" , f"--rcfile={ coveragercfile .as_posix ()} " , "-m" ]
84+
85+ if pytest_cmd == "pytest" :
86+ coverage_cmd .extend (["pytest" ])
87+ else :
88+ coverage_cmd .extend (shlex .split (pytest_cmd , posix = IS_POSIX )[1 :])
8089
81- coverage_cmd = f"{ SAFE_SYS_EXECUTABLE } -m coverage run --rcfile={ coveragercfile .as_posix ()} -m"
8290 results = execute_test_subprocess (
83- shlex .split (coverage_cmd ) + pytest_cmd_list + common_pytest_args + result_args + test_files ,
84- cwd = cwd ,
85- env = pytest_test_env ,
86- timeout = 600 ,
91+ coverage_cmd + common_pytest_args + result_args + test_files , cwd = cwd , env = pytest_test_env , timeout = 600
8792 )
8893 logger .debug (
89- f"Result return code: { results .returncode } "
90- + ( f", Result stderr: { results .stderr } " if results .stderr else "" )
94+ f"Result return code: { results .returncode } , "
95+ f" { ' Result stderr:' + str ( results .stderr ) if results .stderr else '' } "
9196 )
9297 else :
9398 results = execute_test_subprocess (
@@ -97,8 +102,7 @@ def run_behavioral_tests(
97102 timeout = 600 , # TODO: Make this dynamic
98103 )
99104 logger .debug (
100- f"Result return code: { results .returncode } "
101- + (f", Result stderr: { results .stderr } " if results .stderr else "" )
105+ f"""Result return code: { results .returncode } , { "Result stderr:" + str (results .stderr ) if results .stderr else "" } """
102106 )
103107 elif test_framework == "unittest" :
104108 if enable_coverage :
@@ -110,8 +114,7 @@ def run_behavioral_tests(
110114 verbose = verbose , test_file_paths = test_files , test_env = test_env , cwd = cwd
111115 )
112116 logger .debug (
113- f"Result return code: { results .returncode } "
114- + (f", Result stderr: { results .stderr } " if results .stderr else "" )
117+ f"""Result return code: { results .returncode } , { "Result stderr:" + str (results .stderr ) if results .stderr else "" } """
115118 )
116119 else :
117120 msg = f"Unsupported test framework: { test_framework } "
@@ -134,7 +137,11 @@ def run_benchmarking_tests(
134137 pytest_max_loops : int = 100_000 ,
135138) -> tuple [Path , subprocess .CompletedProcess ]:
136139 if test_framework == "pytest" :
137- pytest_cmd_list = shlex .split (pytest_cmd , posix = IS_POSIX )
140+ pytest_cmd_list = (
141+ shlex .split (f"{ SAFE_SYS_EXECUTABLE } -m pytest" , posix = IS_POSIX )
142+ if pytest_cmd == "pytest"
143+ else shlex .split (pytest_cmd )
144+ )
138145 test_files : list [str ] = []
139146 for file in test_paths .test_files :
140147 if file .test_type in [TestType .REPLAY_TEST , TestType .EXISTING_UNIT_TEST ] and file .tests_in_file :
0 commit comments