|
22 | 22 | from tested.serialisation import FunctionCall, FunctionType |
23 | 23 | from tested.testsuite import ( |
24 | 24 | Context, |
| 25 | + FileOutputChannel, |
25 | 26 | MainInput, |
26 | 27 | Output, |
27 | 28 | Suite, |
28 | 29 | SupportedLanguage, |
29 | 30 | Tab, |
30 | 31 | Testcase, |
| 32 | + TextData, |
31 | 33 | ) |
32 | 34 | from tests.manual_utils import assert_valid_output, configuration |
33 | 35 |
|
@@ -196,3 +198,69 @@ def test_mid_tab_is_completed(tmp_path: Path, pytestconfig: pytest.Config): |
196 | 198 | "wrong", |
197 | 199 | "wrong", |
198 | 200 | ] |
| 201 | + |
| 202 | + |
| 203 | +FILE_OUTPUT_SUITE = Suite( |
| 204 | + tabs=[ |
| 205 | + Tab( |
| 206 | + name="Tab 1", |
| 207 | + contexts=[ |
| 208 | + Context( |
| 209 | + testcases=[ |
| 210 | + Testcase( |
| 211 | + input=MainInput(arguments=["hello 1"]), |
| 212 | + output=Output( |
| 213 | + file=FileOutputChannel( |
| 214 | + files=[ |
| 215 | + TextData(path="out1.txt", content="expected1"), |
| 216 | + TextData(path="out2.txt", content="expected2"), |
| 217 | + ] |
| 218 | + ) |
| 219 | + ), |
| 220 | + ), |
| 221 | + ] |
| 222 | + ), |
| 223 | + ], |
| 224 | + ), |
| 225 | + ] |
| 226 | +) |
| 227 | + |
| 228 | + |
| 229 | +def test_complete_evaluation_with_file_output( |
| 230 | + tmp_path: Path, pytestconfig: pytest.Config |
| 231 | +): |
| 232 | + """Test that complete_evaluation emits per-file NOT_EXECUTED tests.""" |
| 233 | + conf = configuration(pytestconfig, "", SupportedLanguage.JAVASCRIPT, tmp_path) |
| 234 | + result = StringIO() |
| 235 | + bundle = create_bundle(conf, result, FILE_OUTPUT_SUITE) |
| 236 | + collector = OutputManager(out=result) |
| 237 | + |
| 238 | + # Open judgement but don't execute anything. |
| 239 | + collector.add(StartJudgement()) |
| 240 | + |
| 241 | + terminate(bundle, collector, status_if_unclosed=Status.RUNTIME_ERROR) |
| 242 | + |
| 243 | + updates = assert_valid_output(result.getvalue(), pytestconfig) |
| 244 | + |
| 245 | + # Each file should produce its own NOT_EXECUTED test. |
| 246 | + start_tests = updates.find_all("start-test") |
| 247 | + assert len(start_tests) == 2 |
| 248 | + assert start_tests[0]["channel"] == "out1.txt" |
| 249 | + assert start_tests[1]["channel"] == "out2.txt" |
| 250 | + |
| 251 | + assert updates.find_status_enum() == [ |
| 252 | + "runtime error", |
| 253 | + "wrong", |
| 254 | + "wrong", |
| 255 | + ] |
| 256 | + |
| 257 | + |
| 258 | +def test_file_output_channel_rejects_none_path(): |
| 259 | + """Test that FileOutputChannel rejects files with path=None.""" |
| 260 | + with pytest.raises(ValueError, match="File path must be set"): |
| 261 | + FileOutputChannel( |
| 262 | + files=[ |
| 263 | + TextData(path=None, content="expected1"), |
| 264 | + TextData(path="out2.txt", content="expected2"), |
| 265 | + ] |
| 266 | + ) |
0 commit comments