diff --git a/tested/dsl/schema-strict.json b/tested/dsl/schema-strict.json
index 8c252b597..b2812f81f 100644
--- a/tested/dsl/schema-strict.json
+++ b/tested/dsl/schema-strict.json
@@ -343,6 +343,7 @@
]
},
"files" : {
+ "description" : "Deprecated: use input_files instead.",
"type" : "array",
"items" : {
"$ref" : "#/definitions/file"
@@ -367,6 +368,10 @@
"$ref" : "#/definitions/textOutputChannel"
},
"file": {
+ "description" : "Deprecated: use output_files instead.",
+ "$ref" : "#/definitions/fileOutputChannel"
+ },
+ "output_files": {
"description" : "Expected files generated by the submission.",
"$ref" : "#/definitions/fileOutputChannel"
},
@@ -611,6 +616,35 @@
}
}
},
+ {
+ "type" : "object",
+ "description" : "Built-in oracle for files.",
+ "required" : [
+ "data"
+ ],
+ "properties" : {
+ "data" : {
+ "type" : "array",
+ "description" : "Files to expect.",
+ "items" : {
+ "$ref" : "#/definitions/fileDataFullyRequired"
+ }
+ },
+ "oracle" : {
+ "const" : "builtin"
+ },
+ "config" : {
+ "$ref" : "#/definitions/fileConfigurationOptions"
+ }
+ }
+ },
+ {
+ "type" : "array",
+ "description" : "Built-in oracle for files.",
+ "items" : {
+ "$ref" : "#/definitions/fileDataFullyRequired"
+ }
+ },
{
"type" : "object",
"description" : "Custom oracle for file values.",
@@ -656,6 +690,49 @@
}
}
}
+ },
+ {
+ "type" : "object",
+ "description" : "Custom oracle for file values.",
+ "required" : [
+ "oracle",
+ "data"
+ ],
+ "properties" : {
+ "oracle" : {
+ "const" : "custom_check"
+ },
+ "data" : {
+ "type" : "array",
+ "description" : "Files to expect.",
+ "items" : {
+ "$ref" : "#/definitions/fileDataFullyRequired"
+ }
+ },
+ "file" : {
+ "type" : "string",
+ "description" : "The path to the file containing the custom check function."
+ },
+ "name" : {
+ "type" : "string",
+ "description" : "The name of the custom check function.",
+ "default" : "evaluate"
+ },
+ "arguments" : {
+ "type" : "array",
+ "description" : "List of YAML (or tagged expression) values to use as arguments to the function.",
+ "items" : {
+ "$ref" : "#/definitions/yamlValueOrPythonExpression"
+ }
+ },
+ "languages": {
+ "type" : "array",
+ "description" : "Which programming languages are supported by this oracle.",
+ "items" : {
+ "$ref" : "#/definitions/programmingLanguage"
+ }
+ }
+ }
}
]
},
@@ -952,6 +1029,27 @@
"description" : "Path to the file, relative to the workdir. Used to display in the output."
}
}
+ },
+ "fileDataFullyRequired": {
+ "type": "object",
+ "additionalProperties" : false,
+ "required" : [
+ "path",
+ "content"
+ ],
+ "properties": {
+ "content": {
+ "type": [
+ "string",
+ "path"
+ ],
+ "description" : "Content of the file, which will be provided inline or written to disk in the workdir. If a !path, the file contents will be read from the provided path."
+ },
+ "path": {
+ "type": "string",
+ "description" : "Path to the file, relative to the workdir. Used to display in the output."
+ }
+ }
}
}
}
diff --git a/tested/dsl/schema.json b/tested/dsl/schema.json
index 7e7362db4..fca440a57 100644
--- a/tested/dsl/schema.json
+++ b/tested/dsl/schema.json
@@ -343,6 +343,7 @@
]
},
"files" : {
+ "description" : "Deprecated: use input_files instead.",
"type" : "array",
"items" : {
"$ref" : "#/definitions/file"
@@ -366,6 +367,14 @@
"description" : "Expected output at stdout",
"$ref" : "#/definitions/textOutputChannel"
},
+ "file": {
+ "description" : "Deprecated: use output_files instead.",
+ "$ref" : "#/definitions/fileOutputChannel"
+ },
+ "output_files": {
+ "description" : "Expected files generated by the submission.",
+ "$ref" : "#/definitions/fileOutputChannel"
+ },
"exit_code" : {
"type" : "integer",
"description" : "Expected exit code for the run"
@@ -611,6 +620,35 @@
}
}
},
+ {
+ "type" : "object",
+ "description" : "Built-in oracle for files.",
+ "required" : [
+ "data"
+ ],
+ "properties" : {
+ "data" : {
+ "type" : "array",
+ "description" : "Files to expect.",
+ "items" : {
+ "$ref" : "#/definitions/fileDataFullyRequired"
+ }
+ },
+ "oracle" : {
+ "const" : "builtin"
+ },
+ "config" : {
+ "$ref" : "#/definitions/fileConfigurationOptions"
+ }
+ }
+ },
+ {
+ "type" : "array",
+ "description" : "Built-in oracle for files.",
+ "items" : {
+ "$ref" : "#/definitions/fileDataFullyRequired"
+ }
+ },
{
"type" : "object",
"description" : "Custom oracle for file values.",
@@ -656,6 +694,49 @@
}
}
}
+ },
+ {
+ "type" : "object",
+ "description" : "Custom oracle for file values.",
+ "required" : [
+ "oracle",
+ "data"
+ ],
+ "properties" : {
+ "oracle" : {
+ "const" : "custom_check"
+ },
+ "data" : {
+ "type" : "array",
+ "description" : "Files to expect.",
+ "items" : {
+ "$ref" : "#/definitions/fileDataFullyRequired"
+ }
+ },
+ "file" : {
+ "type" : "string",
+ "description" : "The path to the file containing the custom check function."
+ },
+ "name" : {
+ "type" : "string",
+ "description" : "The name of the custom check function.",
+ "default" : "evaluate"
+ },
+ "arguments" : {
+ "type" : "array",
+ "description" : "List of YAML (or tagged expression) values to use as arguments to the function.",
+ "items" : {
+ "$ref" : "#/definitions/yamlValueOrPythonExpression"
+ }
+ },
+ "languages": {
+ "type" : "array",
+ "description" : "Which programming languages are supported by this oracle.",
+ "items" : {
+ "$ref" : "#/definitions/programmingLanguage"
+ }
+ }
+ }
}
]
},
@@ -940,6 +1021,24 @@
"description" : "Path to the file, relative to the workdir. Used to display in the output."
}
}
+ },
+ "fileDataFullyRequired": {
+ "type": "object",
+ "additionalProperties" : false,
+ "required" : [
+ "path",
+ "content"
+ ],
+ "properties": {
+ "content": {
+ "type": "string",
+ "description" : "Content of the file, which will be provided inline or written to disk in the workdir. If a !path, the file contents will be read from the provided path."
+ },
+ "path": {
+ "type": "string",
+ "description" : "Path to the file, relative to the workdir. Used to display in the output."
+ }
+ }
}
}
}
diff --git a/tested/dsl/translate_parser.py b/tested/dsl/translate_parser.py
index 9b23315a3..cee9112a0 100644
--- a/tested/dsl/translate_parser.py
+++ b/tested/dsl/translate_parser.py
@@ -305,10 +305,12 @@ def deepen_context(self, new_level: YamlDict | None) -> "DslContext":
return evolve(self, files=the_files, config=the_config)
def merge_inheritable_with_specific_config(
- self, level: YamlDict, config_name: str
+ self, level: YamlObject, config_name: str
) -> dict:
inherited_options = self.config.get(config_name, dict())
- specific_options = level.get("config", dict())
+ specific_options = (
+ level.get("config", dict()) if isinstance(level, dict) else dict()
+ )
assert isinstance(
specific_options, dict
), f"The config options for {config_name} must be a dictionary, not a {type(specific_options)}"
@@ -515,32 +517,50 @@ def _convert_text_output_channel(
def _convert_file_output_channel(
stream: YamlObject, context: DslContext, config_name: str
) -> FileOutputChannel:
- assert isinstance(stream, dict)
- expected = str(stream["content"])
- actual = str(stream["location"])
+ config = context.merge_inheritable_with_specific_config(stream, config_name)
+ if "mode" not in config:
+ config["mode"] = "full"
+ assert config["mode"] in (
+ "full",
+ "line",
+ ), f"The file oracle only supports modes full and line, not {config['mode']}"
+
+ if isinstance(stream, list):
+ # We have a list of files. This is not supported in the old way, so nothing special.
+ files = [_convert_text_data_required_path(f) for f in stream]
+ oracle = GenericTextOracle(name=TextBuiltin.FILE, options=config)
+ elif isinstance(stream, dict):
+ if "content" in stream:
+ # Handle legacy stuff.
+ expected = str(stream["content"])
+ actual = str(stream["location"])
+ files = [
+ TextData(
+ path=actual,
+ content=ContentPath(path=expected),
+ )
+ ]
+ else:
+ file_list = stream.get("data")
+ assert isinstance(
+ file_list, list
+ ), "The data key must be a list of expected files."
+ files = [_convert_text_data_required_path(f) for f in file_list]
- if "oracle" not in stream or stream["oracle"] == "builtin":
- config = context.merge_inheritable_with_specific_config(stream, config_name)
- if "mode" not in config:
- config["mode"] = "full"
-
- assert config["mode"] in (
- "full",
- "line",
- ), f"The file oracle only supports modes full and line, not {config['mode']}"
- return FileOutputChannel(
- expected_path=expected,
- actual_path=actual,
- oracle=GenericTextOracle(name=TextBuiltin.FILE, options=config),
- )
- elif stream["oracle"] == "custom_check":
- return FileOutputChannel(
- expected_path=expected,
- actual_path=actual,
- oracle=_convert_custom_check_oracle(stream),
+ if "oracle" not in stream or stream["oracle"] == "builtin":
+ oracle = GenericTextOracle(name=TextBuiltin.FILE, options=config)
+ elif stream["oracle"] == "custom_check":
+ oracle = _convert_custom_check_oracle(stream)
+ else:
+ raise TypeError(f"Unknown file oracle type: {stream['oracle']}")
+ else:
+ raise InvalidDslError(
+ f"Invalid file output channel: {stream}.\n\n"
+ "File output channels must be a list of files, or a dictionary with a 'data' key."
)
- raise TypeError(f"Unknown file oracle type: {stream['oracle']}")
+
+ return FileOutputChannel(files=files, oracle=oracle)
def _convert_yaml_value(stream: YamlObject) -> Value | None:
@@ -694,6 +714,8 @@ def _convert_testcase(testcase: YamlDict, context: DslContext) -> Testcase:
output.stdout = _convert_text_output_channel(stdout, context, "stdout")
if (file := testcase.get("file")) is not None:
output.file = _convert_file_output_channel(file, context, "file")
+ if (file := testcase.get("output_files")) is not None:
+ output.file = _convert_file_output_channel(file, context, "output_files")
if (stderr := testcase.get("stderr")) is not None:
output.stderr = _convert_text_output_channel(stderr, context, "stderr")
if (exception := testcase.get("exception")) is not None:
diff --git a/tested/judge/evaluation.py b/tested/judge/evaluation.py
index 7b186b9aa..13e202312 100644
--- a/tested/judge/evaluation.py
+++ b/tested/judge/evaluation.py
@@ -112,41 +112,56 @@ def _evaluate_channel(
bundle, context_directory, output, testcase, unexpected_status=unexpected_status
)
# Run the oracle.
- evaluation_result = evaluator(output, actual if actual else "")
- status = evaluation_result.result
+ # The file oracle produces multiple results if comparing multiple files.
+ evaluation_results = evaluator(output, actual if actual else "")
+ evaluation_results = (
+ [evaluation_results]
+ if not isinstance(evaluation_results, list)
+ else evaluation_results
+ )
+
+ for evaluation_result in evaluation_results:
+ status = evaluation_result.result
+
+ # Decide if we should show this channel or not.
+ is_correct = status.enum == Status.CORRECT
+ should_report_case = should_show(output, channel, evaluation_result)
- # Decide if we should show this channel or not.
- is_correct = status.enum == Status.CORRECT
- should_report_case = should_show(output, channel, evaluation_result)
+ if not should_report_case and is_correct:
+ continue
+
+ expected = evaluation_result.readable_expected
+ if evaluation_result.channel_override is not None:
+ display_channel = evaluation_result.channel_override
+ else:
+ display_channel = channel
- if not should_report_case and is_correct:
- # We do report that a test is correct, to set the status.
- return False
+ out.add(StartTest(expected=expected, channel=display_channel))
- expected = evaluation_result.readable_expected
- out.add(StartTest(expected=expected, channel=channel))
+ # Report any messages we received.
+ for message in evaluation_result.messages:
+ out.add(AppendMessage(message=message))
- # Report any messages we received.
- for message in evaluation_result.messages:
- out.add(AppendMessage(message=message))
+ missing = False
+ if actual is None:
+ out.add(AppendMessage(message=get_i18n_string("judge.evaluation.missing")))
+ missing = True
+ elif should_report_case and timeout and not is_correct:
+ status.human = get_i18n_string("judge.evaluation.time-limit")
+ status.enum = Status.TIME_LIMIT_EXCEEDED
+ out.add(AppendMessage(message=status.human))
+ elif should_report_case and memory and not is_correct:
+ status.human = get_i18n_string("judge.evaluation.memory-limit")
+ status.enum = Status.TIME_LIMIT_EXCEEDED
+ out.add(AppendMessage(message=status.human))
- missing = False
- if actual is None:
- out.add(AppendMessage(message=get_i18n_string("judge.evaluation.missing")))
- missing = True
- elif should_report_case and timeout and not is_correct:
- status.human = get_i18n_string("judge.evaluation.time-limit")
- status.enum = Status.TIME_LIMIT_EXCEEDED
- out.add(AppendMessage(message=status.human))
- elif should_report_case and memory and not is_correct:
- status.human = get_i18n_string("judge.evaluation.memory-limit")
- status.enum = Status.TIME_LIMIT_EXCEEDED
- out.add(AppendMessage(message=status.human))
+ # Close the test.
+ out.add(CloseTest(generated=evaluation_result.readable_actual, status=status))
- # Close the test.
- out.add(CloseTest(generated=evaluation_result.readable_actual, status=status))
+ if missing:
+ return True
- return missing
+ return False
def evaluate_context_results(
@@ -244,7 +259,7 @@ def evaluate_context_results(
)
# All files that will be used in this context.
- all_files = context.get_files()
+ all_files = context.get_input_files()
# Begin processing the normal testcases.
for i, testcase in enumerate(context.testcases):
@@ -355,7 +370,9 @@ def evaluate_context_results(
# Add file links
if all_files:
- collector.add(link_files_message(all_files))
+ link_files = link_files_message(all_files)
+ if link_files:
+ collector.add(link_files)
if exec_results.timeout:
return Status.TIME_LIMIT_EXCEEDED
@@ -366,7 +383,7 @@ def evaluate_context_results(
def link_files_message(
link_files: Collection[TextData],
-) -> AppendMessage:
+) -> AppendMessage | None:
link_list = []
for link_file in link_files:
# TODO: handle inline files somehow.
@@ -377,6 +394,9 @@ def link_files_message(
f'{html.escape(link_file.path)}'
)
+ if len(link_list) == 0:
+ return None # Do not append any message if there are no files.
+
file_list = ", ".join(link_list)
file_list_str = get_i18n_string(
"judge.evaluation.files", count=len(link_list), files=file_list
@@ -432,14 +452,17 @@ def should_show(
raise AssertionError(f"Unknown channel {channel}")
-def guess_expected_value(bundle: Bundle, test: OutputChannel) -> str:
+def guess_expected_value(
+ bundle: Bundle, test: OutputChannel, file_index: int | None = None
+) -> str:
"""
Try and get the expected value for an output channel. In some cases, such as
- a programmed or language specific oracle, there will be no expected value
+ a programmed or language-specific oracle, there will be no expected value
available in the test suite. In that case, we use an empty string.
:param bundle: Configuration bundle.
:param test: The output channel.
+ :param file_index: Index of the file in the output channel, if applicable.
:return: A best effort attempt of the expected value.
"""
@@ -448,7 +471,8 @@ def guess_expected_value(bundle: Bundle, test: OutputChannel) -> str:
elif isinstance(test, TextOutputChannel):
return test.get_data_as_string(bundle.config.resources)
elif isinstance(test, FileOutputChannel):
- return test.get_data_as_string(bundle.config.resources)
+ # We know file index will be set when we have a FileOutputChannel.
+ return test.get_data_as_string(bundle.config.resources, file_index or 0)
elif isinstance(test, ExceptionOutputChannel):
return (
test.exception.readable(bundle.config.programming_language)
@@ -472,16 +496,38 @@ def _add_channel(
):
"""Add a channel to the output if it should be shown."""
if should_show(output, channel):
- updates.append(
- StartTest(expected=guess_expected_value(bundle, output), channel=channel)
- )
- updates.append(
- CloseTest(
- generated="",
- status=StatusMessage(enum=Status.NOT_EXECUTED),
- accepted=False,
+ if isinstance(output, FileOutputChannel):
+ for i, file in enumerate(output.files):
+ assert (
+ file.path is not None
+ ), "File path must be set when using output_files"
+
+ updates.append(
+ StartTest(
+ expected=guess_expected_value(bundle, output, i),
+ channel=file.path,
+ )
+ )
+ updates.append(
+ CloseTest(
+ generated="",
+ status=StatusMessage(enum=Status.NOT_EXECUTED),
+ accepted=False,
+ )
+ )
+ else:
+ updates.append(
+ StartTest(
+ expected=guess_expected_value(bundle, output), channel=channel
+ )
+ )
+ updates.append(
+ CloseTest(
+ generated="",
+ status=StatusMessage(enum=Status.NOT_EXECUTED),
+ accepted=False,
+ )
)
- )
def complete_evaluation(bundle: Bundle, collector: OutputManager):
@@ -506,7 +552,7 @@ def complete_evaluation(bundle: Bundle, collector: OutputManager):
if testcase_start == 0:
collector.add(StartContext(description=context.description))
# All files that will be used in this context.
- all_files = context.get_files()
+ all_files = context.get_input_files()
# Begin normal testcases.
for j, testcase in enumerate(
@@ -533,7 +579,9 @@ def complete_evaluation(bundle: Bundle, collector: OutputManager):
# Add links to files we haven't seen yet.
if all_files:
- updates.insert(0, link_files_message(all_files))
+ link_files = link_files_message(all_files)
+ if link_files:
+ updates.insert(0, link_files)
collector.add_all(updates)
collector.add(CloseContext(accepted=False))
diff --git a/tested/judge/planning.py b/tested/judge/planning.py
index 9b887d50f..fb4f21805 100644
--- a/tested/judge/planning.py
+++ b/tested/judge/planning.py
@@ -127,25 +127,67 @@ class PlanStrategy(Enum):
def _flattened_contexts_to_units(
flattened_contexts: list[PlannedContext],
) -> list[list[PlannedContext]]:
+ """
+ Transform a flat list of contexts into a list of execution units.
+
+ This function attempts to produce as few execution units as possible.
+ Contexts are split into a new execution unit whenever there is a "conflict".
+ """
contexts_per_unit = []
current_unit_contexts = []
+ # Track files for the current execution unit.
+ current_input_files: dict[str, ContentPath | str] = {}
+ current_output_files: set[str] = set() # These are paths
+
for planned in flattened_contexts:
- # If we get stdin, start a new execution unit.
- if (
+ planned_input_files = {
+ planned_input_file.path: planned_input_file.content
+ for planned_input_file in planned.context.get_input_files()
+ if planned_input_file.path
+ }
+
+ planned_output_files = [
+ planned_output_file.path
+ for planned_output_file in planned.context.get_output_files()
+ if planned_output_file.path
+ ]
+
+ # If any context wants the same input file with different content, we have a conflict.
+ has_input_file_conflict = any(
+ path in current_input_files and current_input_files[path] != content
+ for path, content in planned_input_files.items()
+ )
+
+ # If any context wants an output file in the same location, we have a conflict.
+ has_output_conflict = any(
+ path in current_output_files for path in planned_output_files
+ )
+
+ # If any context has stdin, we have a conflict.
+ has_stdin_conflict = (
planned.context.has_main_testcase()
and cast(MainInput, planned.context.testcases[0].input).stdin
!= EmptyChannel.NONE
- ):
+ )
+
+ if has_input_file_conflict or has_stdin_conflict or has_output_conflict:
if current_unit_contexts:
contexts_per_unit.append(current_unit_contexts)
current_unit_contexts = []
+ current_input_files.clear()
+ current_output_files.clear()
current_unit_contexts.append(planned)
+ current_input_files.update(planned_input_files)
+ current_output_files.update(planned_output_files)
+ # If any context checks the exit code, we have a conflict in the next context.
if planned.context.has_exit_testcase():
contexts_per_unit.append(current_unit_contexts)
current_unit_contexts = []
+ current_input_files.clear()
+ current_output_files.clear()
if current_unit_contexts:
contexts_per_unit.append(current_unit_contexts)
diff --git a/tested/oracles/__init__.py b/tested/oracles/__init__.py
index 53d7454f0..6ef060283 100644
--- a/tested/oracles/__init__.py
+++ b/tested/oracles/__init__.py
@@ -4,6 +4,7 @@
from tested.configs import Bundle
from tested.dodona import Status
+from tested.oracles import file
from tested.oracles.common import Oracle, RawOracle, _curry_oracle
from tested.testsuite import (
CustomCheckOracle,
@@ -69,7 +70,7 @@ def get_oracle(
if oracle.name == TextBuiltin.TEXT:
return currier(text.evaluate_text, oracle.options)
elif oracle.name == TextBuiltin.FILE:
- return currier(text.evaluate_file, oracle.options)
+ return currier(file.evaluate_file, oracle.options)
raise AssertionError("Unknown built-in text oracle")
# Handle built-in value functions
elif isinstance(oracle, GenericValueOracle):
diff --git a/tested/oracles/common.py b/tested/oracles/common.py
index 3bc2dfa44..cca443591 100644
--- a/tested/oracles/common.py
+++ b/tested/oracles/common.py
@@ -62,15 +62,27 @@ class OracleResult:
Represents the result of applying an oracle to evaluate some result.
"""
- result: StatusMessage # The result of the evaluation.
- readable_expected: (
- str # A human-friendly version of what the channel should have been.
- )
- readable_actual: str # A human-friendly version (on a best-efforts basis) of what the channel is.
+ result: StatusMessage
+ """
+ The result of the evaluation.
+ """
+ readable_expected: str
+ """
+ A human-friendly version of what the channel should have been.
+ """
+ readable_actual: str
+ """
+ A human-friendly version (on a best-efforts basis) of what the channel is.
+ """
messages: list[Message] = field(factory=list)
- is_multiline_string: bool = (
- False # Indicates if the evaluation result is a multiline string.
- )
+ is_multiline_string: bool = False
+ """
+ Indicates if the evaluation result is a multiline string.
+ """
+ channel_override: str | None = None
+ """
+ Allows overriding as which channel this will be reported.
+ """
@fallback_field(
@@ -151,9 +163,11 @@ class OracleConfig:
context_dir: Path
-RawOracle = Callable[[OracleConfig, OutputChannel, str], OracleResult]
+RawOracle = Callable[
+ [OracleConfig, OutputChannel, str], OracleResult | list[OracleResult]
+]
-Oracle = Callable[[OutputChannel, str], OracleResult]
+Oracle = Callable[[OutputChannel, str], OracleResult | list[OracleResult]]
def _curry_oracle(
diff --git a/tested/oracles/file.py b/tested/oracles/file.py
new file mode 100644
index 000000000..90dd84de2
--- /dev/null
+++ b/tested/oracles/file.py
@@ -0,0 +1,101 @@
+from attr import evolve
+
+from tested.dodona import Status, StatusMessage
+from tested.internationalization import get_i18n_string
+from tested.oracles.common import OracleConfig, OracleResult
+from tested.oracles.text import compare_text, text_options
+from tested.testsuite import ContentPath, FileOutputChannel, OutputChannel, TextData
+
+
+def evaluate_file(
+ config: OracleConfig, channel: OutputChannel, actual: str
+) -> list[OracleResult] | OracleResult:
+ """
+ Evaluate the contents of two files. The file oracle supports one option,
+ ``mode``, used to define in which mode the oracle should operate:
+
+ 1. ``full``: The complete contents are passed to the :class:`TextEvaluator`.
+ 2. ``line``: The file is split by lines and each line is compared to the
+ corresponding line with the :class:`TextEvaluator`. The lines are compared
+ without newlines.
+
+ Since the text oracle is used behind the scenes, this oracle also supports
+ all parameters of that oracle.
+
+ When no mode is passed, the oracle will default to ``full``.
+ """
+ assert isinstance(channel, FileOutputChannel)
+ options = text_options(config)
+
+ # There must be nothing as output.
+ if actual:
+ message = get_i18n_string("oracles.text.file.unexpected.message", actual=actual)
+ return OracleResult(
+ result=StatusMessage(
+ enum=Status.WRONG,
+ human=get_i18n_string("oracles.text.file.unexpected.status"),
+ ),
+ readable_expected="",
+ readable_actual=actual,
+ messages=[message],
+ )
+
+ results = []
+
+ for file in channel.files:
+ results.append(compare_file(config, file, options))
+
+ return results
+
+
+def compare_file(
+ config: OracleConfig,
+ file: TextData,
+ options: dict,
+) -> OracleResult:
+ assert isinstance(
+ file.path, str
+ ), "File path must be a string when using file evaluator"
+
+ try:
+ expected_content = file.get_data_as_string(config.bundle.config.resources)
+ except FileNotFoundError:
+ # We know content is ContentPath if we get a file not found error.
+ assert isinstance(file.content, ContentPath)
+ raise ValueError(f"File {file.content.path} not found in resources.")
+
+ actual_path = config.context_dir / file.path
+
+ try:
+ with open(str(actual_path), "r") as f:
+ actual = f.read()
+ except FileNotFoundError:
+ return OracleResult(
+ result=StatusMessage(
+ enum=Status.RUNTIME_ERROR,
+ human=get_i18n_string("oracles.text.file.not-found"),
+ ),
+ readable_expected=expected_content,
+ readable_actual="",
+ channel_override=file.path,
+ )
+
+ if options["mode"] == "full":
+ text_result = compare_text(options, expected_content, actual)
+ return evolve(text_result, channel_override=file.path)
+ else:
+ assert options["mode"] == "line"
+ strip_newlines = options.get("stripNewlines", False)
+ expected_lines = expected_content.splitlines(keepends=not strip_newlines)
+ actual_lines = actual.splitlines(keepends=not strip_newlines)
+ correct = len(actual_lines) == len(expected_lines)
+ for expected_line, actual_line in zip(expected_lines, actual_lines):
+ r = compare_text(options, expected_line, actual_line)
+ correct = correct and r.result.enum == Status.CORRECT
+
+ return OracleResult(
+ result=StatusMessage(enum=Status.CORRECT if correct else Status.WRONG),
+ readable_expected=expected_content,
+ readable_actual=actual,
+ channel_override=file.path,
+ )
diff --git a/tested/oracles/text.py b/tested/oracles/text.py
index 6724e18b9..6ed1387ef 100644
--- a/tested/oracles/text.py
+++ b/tested/oracles/text.py
@@ -6,9 +6,8 @@
from typing import Any
from tested.dodona import Status, StatusMessage
-from tested.internationalization import get_i18n_string
from tested.oracles.common import OracleConfig, OracleResult
-from tested.testsuite import FileOutputChannel, OutputChannel, TextOutputChannel
+from tested.testsuite import OutputChannel, TextOutputChannel
def _is_number(string: str) -> float | None:
@@ -18,7 +17,7 @@ def _is_number(string: str) -> float | None:
return None
-def _text_options(config: OracleConfig) -> dict:
+def text_options(config: OracleConfig) -> dict:
defaults = {
# Options for textual comparison
"ignoreWhitespace": False,
@@ -94,82 +93,8 @@ def evaluate_text(
Note: floating points inside other texts are currently not supported.
"""
assert isinstance(channel, TextOutputChannel)
- options = _text_options(config)
+ options = text_options(config)
expected = channel.get_data_as_string(config.bundle.config.resources)
result = compare_text(options, expected, actual)
return result
-
-
-def evaluate_file(
- config: OracleConfig, channel: OutputChannel, actual: str
-) -> OracleResult:
- """
- Evaluate the contents of two files. The file oracle supports one option,
- ``mode``, used to define in which mode the oracle should operate:
-
- 1. ``full``: The complete contents are passed to the :class:`TextEvaluator`.
- 2. ``line``: The file is split by lines and each line is compared to the
- corresponding line with the :class:`TextEvaluator`. The lines are compared
- without newlines.
-
- Since the text oracle is used behind the scenes, this oracle also supports
- all parameters of that oracle.
-
- When no mode is passed, the oracle will default to ``full``.
- """
- assert isinstance(channel, FileOutputChannel)
- options = _text_options(config)
-
- # There must be nothing as output.
- if actual:
- message = get_i18n_string("oracles.text.file.unexpected.message", actual=actual)
- return OracleResult(
- result=StatusMessage(
- enum=Status.WRONG,
- human=get_i18n_string("oracles.text.file.unexpected.status"),
- ),
- readable_expected="",
- readable_actual=actual,
- messages=[message],
- )
-
- expected_path = f"{config.bundle.config.resources}/{channel.expected_path}"
-
- try:
- with open(expected_path, "r") as file:
- expected = file.read()
- except FileNotFoundError:
- raise ValueError(f"File {expected_path} not found in resources.")
-
- actual_path = config.context_dir / channel.actual_path
-
- try:
- with open(str(actual_path), "r") as file:
- actual = file.read()
- except FileNotFoundError:
- return OracleResult(
- result=StatusMessage(
- enum=Status.RUNTIME_ERROR,
- human=get_i18n_string("oracles.text.file.not-found"),
- ),
- readable_expected=expected,
- readable_actual="",
- )
-
- if options["mode"] == "full":
- return compare_text(options, expected, actual)
- else:
- assert options["mode"] == "line"
- strip_newlines = options.get("stripNewlines", False)
- expected_lines = expected.splitlines(keepends=not strip_newlines)
- actual_lines = actual.splitlines(keepends=not strip_newlines)
- correct = len(actual_lines) == len(expected_lines)
- for expected_line, actual_line in zip(expected_lines, actual_lines):
- r = compare_text(options, expected_line, actual_line)
- correct = correct and r.result.enum == Status.CORRECT
- return OracleResult(
- result=StatusMessage(enum=Status.CORRECT if correct else Status.WRONG),
- readable_expected=expected,
- readable_actual=actual,
- )
diff --git a/tested/oracles/value.py b/tested/oracles/value.py
index e86d0c3a5..f6716b38c 100644
--- a/tested/oracles/value.py
+++ b/tested/oracles/value.py
@@ -20,6 +20,7 @@
from tested.oracles.common import OracleConfig, OracleResult
from tested.parsing import get_converter
from tested.serialisation import (
+ Expression,
ObjectKeyValuePair,
ObjectType,
SequenceType,
@@ -30,6 +31,8 @@
to_python_comparable,
)
from tested.testsuite import (
+ ExceptionOutputChannel,
+ FileOutputChannel,
OracleOutputChannel,
OutputChannel,
TextOutputChannel,
@@ -56,25 +59,69 @@ def get_values(
bundle: Bundle, output_channel: OracleOutputChannel, actual_str: str
) -> OracleResult | tuple[Value, str, Value | None, str]:
if isinstance(output_channel, TextOutputChannel):
- expected = output_channel.get_data_as_string(bundle.config.resources)
- expected_value = StringType(type=BasicStringTypes.TEXT, data=expected)
- actual_value = StringType(type=BasicStringTypes.TEXT, data=actual_str)
- return expected_value, expected, actual_value, actual_str
+ return _get_text_value(bundle, output_channel, actual_str)
+ elif isinstance(output_channel, FileOutputChannel):
+ return _get_file_values(bundle, output_channel)
+ elif isinstance(output_channel, ValueOutputChannel):
+ return _get_value_value(bundle, output_channel, actual_str)
+ else:
+ raise ValueError(f"Unsupported output channel: {output_channel}")
+
+
+def _get_file_values(
+ bundle: Bundle, output_channel: FileOutputChannel
+) -> tuple[Value, str, Value, str]:
+ expected = [
+ StringType(
+ type=BasicStringTypes.TEXT,
+ data=output_channel.get_data_as_string(bundle.config.resources, i),
+ )
+ for i, _ in enumerate(output_channel.files)
+ ]
+ actual = [
+ StringType(type=BasicStringTypes.TEXT, data=f.path)
+ for f in output_channel.files
+ if f.path
+ ]
+ # noinspection PyUnnecessaryCast
+ expected_value = SequenceType(
+ type=BasicSequenceTypes.SEQUENCE, data=cast(list[Expression], expected)
+ )
+ # noinspection PyUnnecessaryCast
+ actual_value = SequenceType(
+ type=BasicSequenceTypes.SEQUENCE, data=cast(list[Expression], actual)
+ )
- assert isinstance(output_channel, ValueOutputChannel)
+ expected_str = ", ".join(f.data for f in expected)
+ actual_str = ", ".join(f.data for f in actual)
- expected = output_channel.value
- assert isinstance(expected, Value)
- readable_expected = generate_statement(bundle, expected)
+ return expected_value, expected_str, actual_value, actual_str
+
+
+def _get_text_value(
+ bundle: Bundle, output_channel: TextOutputChannel, actual_str: str
+) -> tuple[Value, str, Value, str]:
+ expected = output_channel.get_data_as_string(bundle.config.resources)
+ expected_value = StringType(type=BasicStringTypes.TEXT, data=expected)
+ actual_value = StringType(type=BasicStringTypes.TEXT, data=actual_str)
+ return expected_value, expected, actual_value, actual_str
+
+
+def _get_value_value(
+ bundle: Bundle, output_channel: ValueOutputChannel, actual_str: str
+) -> OracleResult | tuple[Value, str, Value | None, str]:
+ expected_value = output_channel.value
+ assert isinstance(expected_value, Value)
+ readable_expected = generate_statement(bundle, expected_value)
# Special support for empty strings.
if not actual_str.strip():
- return expected, readable_expected, None, ""
+ return expected_value, readable_expected, None, ""
# A crash here indicates a problem with one of the language implementations,
# or a student is trying to cheat.
try:
- actual = parse_value(actual_str)
+ actual_value = parse_value(actual_str)
except Exception as e:
raw_message = f"Received {actual_str}, which caused {e} for get_values."
message = ExtendedMessage(
@@ -88,8 +135,8 @@ def get_values(
messages=[message],
)
- readable_actual = generate_statement(bundle, actual)
- return expected, readable_expected, actual, readable_actual
+ readable_actual = generate_statement(bundle, actual_value)
+ return expected_value, readable_expected, actual_value, readable_actual
def _prepare_value_for_type_check(value: Value) -> Value:
diff --git a/tested/testsuite.py b/tested/testsuite.py
index 05f0396e9..3e3aa499e 100644
--- a/tested/testsuite.py
+++ b/tested/testsuite.py
@@ -293,25 +293,39 @@ class TextOutputChannel(TextData):
oracle: GenericTextOracle | CustomCheckOracle = field(factory=GenericTextOracle)
-@fallback_field({"evaluator": "oracle"})
-@ignore_field("show_expected")
+def _file_to_files_converter(value: Any, full: Any) -> Any:
+ if not isinstance(value, str):
+ return value
+
+ if "actual_path" not in full or not isinstance(full["actual_path"], str):
+ return value
+
+ return [{"path": full["actual_path"], "content": {"path": value}}]
+
+
+@fallback_field(
+ {"evaluator": "oracle", "expected_path": ("files", _file_to_files_converter)}
+)
+@ignore_field("show_expected", "actual_path")
@define
class FileOutputChannel(WithFeatures):
"""Describes the output for files."""
- expected_path: str # Path to the file to compare to.
- actual_path: str # Path to the generated file (by the user code)
+ files: list[TextData]
oracle: GenericTextOracle | CustomCheckOracle = field(
factory=lambda: GenericTextOracle(name=TextBuiltin.FILE)
)
+ def __attrs_post_init__(self):
+ for f in self.files:
+ if f.path is None:
+ raise ValueError("File path must be set when using output_files.")
+
def get_used_features(self) -> FeatureSet:
return NOTHING
- def get_data_as_string(self, resources: Path) -> str:
- file_path = _resolve_path(resources, self.expected_path)
- with open(file_path, "r") as file:
- return file.read()
+ def get_data_as_string(self, resources: Path, file_index: int) -> str:
+ return self.files[file_index].get_data_as_string(resources)
@fallback_field({"evaluator": "oracle"})
@@ -703,12 +717,19 @@ def has_main_testcase(self):
def has_exit_testcase(self):
return not self.testcases[-1].output.exit_code == IgnoredChannel.IGNORED
- def get_files(self) -> set[TextData]:
+ def get_input_files(self) -> set[TextData]:
all_files = set()
for t in self.testcases:
all_files = all_files.union(t.input_files)
return all_files
+ def get_output_files(self) -> set[TextData]:
+ all_files = set()
+ for t in self.testcases:
+ if isinstance(t.output.file, FileOutputChannel):
+ all_files.update(t.output.file.files)
+ return all_files
+
def _runs_to_tab_converter(runs: list | None, _):
assert isinstance(runs, list), "The field 'runs' must be a list."
diff --git a/tests/exercises/output-files-custom-oracle/evaluation/evaluator.py b/tests/exercises/output-files-custom-oracle/evaluation/evaluator.py
new file mode 100644
index 000000000..ddc87510a
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/evaluation/evaluator.py
@@ -0,0 +1,25 @@
+import os
+from evaluation_utils import EvaluationResult
+
+
+def evaluate(context):
+ file_path = os.path.join(context.execution_directory, "even.txt")
+ if not os.path.isfile(file_path):
+ return EvaluationResult(False, "The file even.txt should exist", f"The file even.txt was not found in {context.execution_directory}")
+
+ with open(file_path, "r") as f:
+ content = f.read().splitlines()
+
+ # Check if all numbers from 1 to 10 are even
+ try:
+ numbers = [int(line.strip()) for line in content if line.strip()]
+ except ValueError:
+ return EvaluationResult(False, "The file even.txt should contain even numbers", f"The file even.txt contains non-numeric data: {content}")
+
+ expected_numbers = {2, 4, 6, 8, 10}
+ actual_numbers = set(numbers)
+
+ if expected_numbers == actual_numbers:
+ return EvaluationResult(True, str(expected_numbers), str(actual_numbers))
+ else:
+ return EvaluationResult(False, f"Expected {expected_numbers}, but got {actual_numbers}", str(actual_numbers))
diff --git a/tests/exercises/output-files-custom-oracle/evaluation/suite.yaml b/tests/exercises/output-files-custom-oracle/evaluation/suite.yaml
new file mode 100644
index 000000000..d6b85910e
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/evaluation/suite.yaml
@@ -0,0 +1,9 @@
+- tab: "Custom File Check"
+ testcases:
+ - expression: "generate_even()"
+ output_files:
+ oracle: "custom_check"
+ file: "evaluator.py"
+ data:
+ - path: "even.txt"
+ content: "2\n4\n6\n8\n10\n"
diff --git a/tests/exercises/output-files-custom-oracle/solution/correct.cpp b/tests/exercises/output-files-custom-oracle/solution/correct.cpp
new file mode 100644
index 000000000..ac2a9127a
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/correct.cpp
@@ -0,0 +1,11 @@
+#include
+#include
+
+void generate_even() {
+ std::ofstream outfile("even.txt");
+ std::vector evens = {10, 8, 6, 4, 2};
+ for (int n : evens) {
+ outfile << n << "\n";
+ }
+ outfile.close();
+}
diff --git a/tests/exercises/output-files-custom-oracle/solution/correct.cs b/tests/exercises/output-files-custom-oracle/solution/correct.cs
new file mode 100644
index 000000000..7eca4e800
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/correct.cs
@@ -0,0 +1,8 @@
+using System;
+using System.IO;
+
+public class Submission {
+ public static void GenerateEven() {
+ File.WriteAllText("even.txt", "10\n8\n6\n4\n2\n");
+ }
+}
diff --git a/tests/exercises/output-files-custom-oracle/solution/correct.java b/tests/exercises/output-files-custom-oracle/solution/correct.java
new file mode 100644
index 000000000..c357dad19
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/correct.java
@@ -0,0 +1,14 @@
+import java.io.FileWriter;
+import java.io.IOException;
+
+public class Submission {
+ public static void generateEven() {
+ try (FileWriter writer = new FileWriter("even.txt")) {
+ for (int i : new int[]{10, 8, 6, 4, 2}) {
+ writer.write(i + "\n");
+ }
+ } catch (IOException e) {
+ System.err.println("Error writing to file: " + e.getMessage());
+ }
+ }
+}
diff --git a/tests/exercises/output-files-custom-oracle/solution/correct.js b/tests/exercises/output-files-custom-oracle/solution/correct.js
new file mode 100644
index 000000000..6a6e79c67
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/correct.js
@@ -0,0 +1,7 @@
+const fs = require('fs');
+
+function generateEven() {
+ fs.writeFileSync('even.txt', '10\n8\n6\n4\n2\n');
+}
+
+module.exports = {generateEven};
diff --git a/tests/exercises/output-files-custom-oracle/solution/correct.kt b/tests/exercises/output-files-custom-oracle/solution/correct.kt
new file mode 100644
index 000000000..57f8bfc86
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/correct.kt
@@ -0,0 +1,5 @@
+import java.io.File
+
+fun generateEven() {
+ File("even.txt").writeText("10\n8\n6\n4\n2\n")
+}
diff --git a/tests/exercises/output-files-custom-oracle/solution/correct.py b/tests/exercises/output-files-custom-oracle/solution/correct.py
new file mode 100644
index 000000000..a5474d6c8
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/correct.py
@@ -0,0 +1,5 @@
+def generate_even():
+ with open("even.txt", "w") as f:
+ # Generate them in non-sorted order to show the custom oracle is needed
+ for i in [10, 8, 6, 4, 2]:
+ f.write(f"{i}\n")
diff --git a/tests/exercises/output-files-custom-oracle/solution/correct.ts b/tests/exercises/output-files-custom-oracle/solution/correct.ts
new file mode 100644
index 000000000..08ce34556
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/correct.ts
@@ -0,0 +1,5 @@
+import * as fs from 'fs';
+
+function generateEven(): void {
+ fs.writeFileSync('even.txt', '10\n8\n6\n4\n2\n');
+}
diff --git a/tests/exercises/output-files-custom-oracle/solution/wrong.cpp b/tests/exercises/output-files-custom-oracle/solution/wrong.cpp
new file mode 100644
index 000000000..e80fe1e81
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/wrong.cpp
@@ -0,0 +1,11 @@
+#include
+#include
+
+void generate_even() {
+ std::ofstream outfile("even.txt");
+ std::vector evens = {9, 8, 6, 4, 2};
+ for (int n : evens) {
+ outfile << n << "\n";
+ }
+ outfile.close();
+}
diff --git a/tests/exercises/output-files-custom-oracle/solution/wrong.cs b/tests/exercises/output-files-custom-oracle/solution/wrong.cs
new file mode 100644
index 000000000..9fc6d02fd
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/wrong.cs
@@ -0,0 +1,8 @@
+using System;
+using System.IO;
+
+public class Submission {
+ public static void GenerateEven() {
+ File.WriteAllText("even.txt", "9\n8\n6\n4\n2\n");
+ }
+}
diff --git a/tests/exercises/output-files-custom-oracle/solution/wrong.java b/tests/exercises/output-files-custom-oracle/solution/wrong.java
new file mode 100644
index 000000000..47e51fd99
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/wrong.java
@@ -0,0 +1,14 @@
+import java.io.FileWriter;
+import java.io.IOException;
+
+public class Submission {
+ public static void generateEven() {
+ try (FileWriter writer = new FileWriter("even.txt")) {
+ for (int i : new int[]{9, 8, 6, 4, 2}) {
+ writer.write(i + "\n");
+ }
+ } catch (IOException e) {
+ System.err.println("Error writing to file: " + e.getMessage());
+ }
+ }
+}
diff --git a/tests/exercises/output-files-custom-oracle/solution/wrong.js b/tests/exercises/output-files-custom-oracle/solution/wrong.js
new file mode 100644
index 000000000..5a46328df
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/wrong.js
@@ -0,0 +1,7 @@
+const fs = require('fs');
+
+function generateEven() {
+ fs.writeFileSync('even.txt', '9\n8\n6\n4\n2\n');
+}
+
+module.exports = {generateEven};
diff --git a/tests/exercises/output-files-custom-oracle/solution/wrong.kt b/tests/exercises/output-files-custom-oracle/solution/wrong.kt
new file mode 100644
index 000000000..72c80a7ee
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/wrong.kt
@@ -0,0 +1,5 @@
+import java.io.File
+
+fun generateEven() {
+ File("even.txt").writeText("9\n8\n6\n4\n2\n")
+}
diff --git a/tests/exercises/output-files-custom-oracle/solution/wrong.py b/tests/exercises/output-files-custom-oracle/solution/wrong.py
new file mode 100644
index 000000000..120c44442
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/wrong.py
@@ -0,0 +1,5 @@
+def generate_even():
+ with open("even.txt", "w") as f:
+ # Generate them in non-sorted order, but with one wrong number (9 instead of 10)
+ for i in [9, 8, 6, 4, 2]:
+ f.write(f"{i}\n")
diff --git a/tests/exercises/output-files-custom-oracle/solution/wrong.ts b/tests/exercises/output-files-custom-oracle/solution/wrong.ts
new file mode 100644
index 000000000..dbd1432c3
--- /dev/null
+++ b/tests/exercises/output-files-custom-oracle/solution/wrong.ts
@@ -0,0 +1,5 @@
+import * as fs from 'fs';
+
+function generateEven(): void {
+ fs.writeFileSync('even.txt', '9\n8\n6\n4\n2\n');
+}
diff --git a/tests/exercises/time-2-code/evaluation/plan.yml b/tests/exercises/time-2-code/evaluation/plan.yml
new file mode 100644
index 000000000..96936a6c2
--- /dev/null
+++ b/tests/exercises/time-2-code/evaluation/plan.yml
@@ -0,0 +1,20 @@
+tabs:
+ - tab: "Main"
+ testcases:
+ - stdin: "Dave"
+ stdout: |-
+ Hello, I don't believe we have met.
+ Nice to meet you Dave.
+ input_files:
+ - path: "datafile.txt"
+ content: ""
+ output_files:
+ - path: "datafile.txt"
+ content: "Dave"
+ - stdout: "It's good to see you again, Craig."
+ input_files:
+ - path: "datafile.txt"
+ content: "Craig"
+ output_files:
+ - path: "datafile.txt"
+ content: "Craig"
diff --git a/tests/exercises/time-2-code/solution/solution.c b/tests/exercises/time-2-code/solution/solution.c
new file mode 100644
index 000000000..5b88cb5d9
--- /dev/null
+++ b/tests/exercises/time-2-code/solution/solution.c
@@ -0,0 +1,50 @@
+#include
+#include
+#include
+#include
+
+// Helper to strip whitespace/newlines
+void trim(char *str) {
+ int len = strlen(str);
+ while (len > 0 && isspace(str[len - 1])) {
+ str[--len] = '\0';
+ }
+}
+
+void load(const char *filename, char *user, int max_len) {
+ FILE *file = fopen(filename, "r");
+ if (file) {
+ if (fgets(user, max_len, file) == NULL) {
+ user[0] = '\0';
+ }
+ fclose(file);
+ trim(user);
+ } else {
+ user[0] = '\0';
+ }
+}
+
+void save(const char *user, const char *filename) {
+ FILE *file = fopen(filename, "w");
+ if (file) {
+ fprintf(file, "%s\n", user);
+ fclose(file);
+ }
+}
+
+int main() {
+ char user[256] = {0};
+ load("datafile.txt", user, sizeof(user));
+
+ if (strlen(user) == 0) {
+ printf("Hello, I don't believe we have met.\n");
+ fgets(user, sizeof(user), stdin);
+ trim(user);
+ save(user, "datafile.txt");
+ printf("Nice to meet you %s.\n", user);
+ } else {
+ printf("It's good to see you again, %s.\n", user);
+ }
+
+ return 0;
+}
diff --git a/tests/exercises/time-2-code/solution/solution.cpp b/tests/exercises/time-2-code/solution/solution.cpp
new file mode 100644
index 000000000..625ae5377
--- /dev/null
+++ b/tests/exercises/time-2-code/solution/solution.cpp
@@ -0,0 +1,36 @@
+#include
+#include
+#include
+
+std::string load(const std::string& filename) {
+ std::ifstream file(filename);
+ std::string user = "";
+ if (file.is_open()) {
+ std::getline(file, user);
+ file.close();
+ }
+ return user;
+}
+
+void save(const std::string& user, const std::string& filename) {
+ std::ofstream file(filename);
+ if (file.is_open()) {
+ file << user << "\n";
+ file.close();
+ }
+}
+
+int main() {
+ std::string user = load("datafile.txt");
+
+ if (user == "") {
+ std::cout << "Hello, I don't believe we have met.\n";
+ std::getline(std::cin, user);
+ save(user, "datafile.txt");
+ std::cout << "Nice to meet you " << user << ".\n";
+ } else {
+ std::cout << "It's good to see you again, " << user << ".\n";
+ }
+
+ return 0;
+}
diff --git a/tests/exercises/time-2-code/solution/solution.cs b/tests/exercises/time-2-code/solution/solution.cs
new file mode 100644
index 000000000..697147d5d
--- /dev/null
+++ b/tests/exercises/time-2-code/solution/solution.cs
@@ -0,0 +1,35 @@
+string user = Load("datafile.txt");
+
+if (string.IsNullOrEmpty(user))
+{
+ Console.WriteLine("Hello, I don't believe we have met.");
+ user = Console.ReadLine() ?? "";
+ Save(user, "datafile.txt");
+ Console.WriteLine($"Nice to meet you {user}.");
+}
+else
+{
+ Console.WriteLine($"It's good to see you again, {user}.");
+}
+
+// -------------------------
+// Subprograms (Local Functions)
+// -------------------------
+
+string Load(string filename)
+{
+ try
+ {
+ return File.ReadAllText(filename).Trim();
+ }
+ catch (FileNotFoundException)
+ {
+ return "";
+ }
+}
+
+void Save(string nameToSave, string filename)
+{
+ // Using Environment.NewLine is the modern C# equivalent to "\n"
+ File.WriteAllText(filename, nameToSave + Environment.NewLine);
+}
diff --git a/tests/exercises/time-2-code/solution/solution.hs b/tests/exercises/time-2-code/solution/solution.hs
new file mode 100644
index 000000000..667b1c54d
--- /dev/null
+++ b/tests/exercises/time-2-code/solution/solution.hs
@@ -0,0 +1,28 @@
+import System.IO
+import System.Directory (doesFileExist)
+
+load :: FilePath -> IO String
+load filename = do
+ exists <- doesFileExist filename
+ if exists
+ then do
+ contents <- readFile filename
+ -- Grab the first line, defaulting to empty string if file is empty
+ return $ if null (lines contents) then "" else head (lines contents)
+ else return ""
+
+save :: String -> FilePath -> IO ()
+save user filename = writeFile filename (user ++ "\n")
+
+main :: IO ()
+main = do
+ user <- load "datafile.txt"
+ if null user
+ then do
+ putStrLn "Hello, I don't believe we have met."
+ hFlush stdout -- Ensure the prompt prints before asking for input
+ newName <- getLine
+ save newName "datafile.txt"
+ putStrLn $ "Nice to meet you " ++ newName ++ "."
+ else
+ putStrLn $ "It's good to see you again, " ++ user ++ "."
diff --git a/tests/exercises/time-2-code/solution/solution.java b/tests/exercises/time-2-code/solution/solution.java
new file mode 100644
index 000000000..deaf8a205
--- /dev/null
+++ b/tests/exercises/time-2-code/solution/solution.java
@@ -0,0 +1,42 @@
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+import java.util.Scanner;
+
+public class Submission {
+ static String load(String filename) {
+ var path = Path.of(filename);
+ try {
+ return Files.readString(path).trim();
+ } catch (IOException e) {
+ return "";
+ }
+ }
+
+ static void save(String user, String filename) {
+ var path = Path.of(filename);
+ try {
+ Files.writeString(path, user + System.lineSeparator(),
+ StandardOpenOption.CREATE,
+ StandardOpenOption.TRUNCATE_EXISTING);
+ } catch (IOException e) {
+ System.err.println("Error saving file: " + e.getMessage());
+ }
+ }
+
+ public static void main(String[] args) {
+ var user = load("datafile.txt");
+
+ if (user.isEmpty()) {
+ System.out.println("Hello, I don't believe we have met.");
+ try (var scanner = new Scanner(System.in)) {
+ user = scanner.nextLine();
+ save(user, "datafile.txt");
+ System.out.println("Nice to meet you " + user + ".");
+ }
+ } else {
+ System.out.println("It's good to see you again, " + user + ".");
+ }
+ }
+}
diff --git a/tests/exercises/time-2-code/solution/solution.js b/tests/exercises/time-2-code/solution/solution.js
new file mode 100644
index 000000000..2ab5040be
--- /dev/null
+++ b/tests/exercises/time-2-code/solution/solution.js
@@ -0,0 +1,33 @@
+const fs = require('fs');
+const readline = require('readline');
+
+function load(filename) {
+ try {
+ return fs.readFileSync(filename, 'utf8').trim();
+ } catch (err) {
+ return "";
+ }
+}
+
+function save(user, filename) {
+ fs.writeFileSync(filename, `${user}\n`, 'utf8');
+}
+
+const user = load("datafile.txt");
+
+if (user === "") {
+ console.log("Hello, I don't believe we have met.");
+ const rl = readline.createInterface({
+ input: process.stdin,
+ output: process.stdout
+ });
+
+ // Empty string to wait silently for input
+ rl.question("", (name) => {
+ save(name, "datafile.txt");
+ console.log(`Nice to meet you ${name}.`);
+ rl.close();
+ });
+} else {
+ console.log(`It's good to see you again, ${user}.`);
+}
diff --git a/tests/exercises/time-2-code/solution/solution.kt b/tests/exercises/time-2-code/solution/solution.kt
new file mode 100644
index 000000000..1aa9fb5cd
--- /dev/null
+++ b/tests/exercises/time-2-code/solution/solution.kt
@@ -0,0 +1,23 @@
+import java.io.File
+
+fun load(filename: String): String {
+ val file = File(filename)
+ return if (file.exists()) file.readText().trim() else ""
+}
+
+fun save(user: String, filename: String) {
+ File(filename).writeText("$user\n")
+}
+
+fun main() {
+ var user = load("datafile.txt")
+
+ if (user.isEmpty()) {
+ println("Hello, I don't believe we have met.")
+ user = readln()
+ save(user, "datafile.txt")
+ println("Nice to meet you $user.")
+ } else {
+ println("It's good to see you again, $user.")
+ }
+}
diff --git a/tests/exercises/time-2-code/solution/solution.py b/tests/exercises/time-2-code/solution/solution.py
new file mode 100644
index 000000000..1140e6636
--- /dev/null
+++ b/tests/exercises/time-2-code/solution/solution.py
@@ -0,0 +1,33 @@
+# -------------------------
+# Subprograms
+# -------------------------
+# Load a single line of data
+def load(filename):
+ file = open(filename, "r")
+ user = file.read()
+ file.close()
+ user = user.strip()
+ return user
+
+
+# Save a single line of data
+def save(user, filename):
+ file = open(filename, "w")
+ user = user + "\n"
+ file.write(user)
+ file.close()
+
+
+# -------------------------
+# Main program
+# -------------------------
+user = load("datafile.txt")
+# If the file is empty, ask for the data to save...
+if user == "":
+ print("Hello, I don't believe we have met.")
+ user = input("What is your name? ")
+ save(user, "datafile.txt")
+ print("Nice to meet you", user + ".")
+# ...otherwise display the data in the file
+else:
+ print("It's good to see you again,", user + ".")
diff --git a/tests/exercises/time-2-code/solution/solution.sh b/tests/exercises/time-2-code/solution/solution.sh
new file mode 100644
index 000000000..583be9150
--- /dev/null
+++ b/tests/exercises/time-2-code/solution/solution.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+load() {
+ local filename="$1"
+ if [[ -f "$filename" ]]; then
+ cat "$filename" | xargs # xargs acts as a simple string trimmer
+ fi
+}
+
+save() {
+ local user="$1"
+ local filename="$2"
+ echo "$user" > "$filename"
+}
+
+user=$(load "datafile.txt")
+
+if [[ -z "$user" ]]; then
+ echo "Hello, I don't believe we have met."
+ read -r user
+ save "$user" "datafile.txt"
+ echo "Nice to meet you $user."
+else
+ echo "It's good to see you again, $user."
+fi
diff --git a/tests/exercises/time-2-code/solution/solution.ts b/tests/exercises/time-2-code/solution/solution.ts
new file mode 100644
index 000000000..7fb5fbe4e
--- /dev/null
+++ b/tests/exercises/time-2-code/solution/solution.ts
@@ -0,0 +1,33 @@
+import * as fs from 'fs';
+import * as readline from 'readline';
+
+const rl = readline.createInterface({
+ input: process.stdin,
+ output: process.stdout
+});
+
+function load(filename: string): string {
+ try {
+ return fs.readFileSync(filename, 'utf8').trim();
+ } catch (err) {
+ return "";
+ }
+}
+
+function save(user: string, filename: string): void {
+ fs.writeFileSync(filename, user + "\n");
+}
+
+let user: string = load("datafile.txt");
+
+if (user === "") {
+ console.log("Hello, I don't believe we have met.");
+ rl.question("", (name: string) => {
+ save(name, "datafile.txt");
+ console.log("Nice to meet you " + name + ".");
+ rl.close();
+ });
+} else {
+ console.log("It's good to see you again, " + user + ".");
+ rl.close();
+}
diff --git a/tests/test_collector.py b/tests/test_collector.py
index 0976c0924..5a86ee7cc 100644
--- a/tests/test_collector.py
+++ b/tests/test_collector.py
@@ -22,12 +22,14 @@
from tested.serialisation import FunctionCall, FunctionType
from tested.testsuite import (
Context,
+ FileOutputChannel,
MainInput,
Output,
Suite,
SupportedLanguage,
Tab,
Testcase,
+ TextData,
)
from tests.manual_utils import assert_valid_output, configuration
@@ -196,3 +198,69 @@ def test_mid_tab_is_completed(tmp_path: Path, pytestconfig: pytest.Config):
"wrong",
"wrong",
]
+
+
+FILE_OUTPUT_SUITE = Suite(
+ tabs=[
+ Tab(
+ name="Tab 1",
+ contexts=[
+ Context(
+ testcases=[
+ Testcase(
+ input=MainInput(arguments=["hello 1"]),
+ output=Output(
+ file=FileOutputChannel(
+ files=[
+ TextData(path="out1.txt", content="expected1"),
+ TextData(path="out2.txt", content="expected2"),
+ ]
+ )
+ ),
+ ),
+ ]
+ ),
+ ],
+ ),
+ ]
+)
+
+
+def test_complete_evaluation_with_file_output(
+ tmp_path: Path, pytestconfig: pytest.Config
+):
+ """Test that complete_evaluation emits per-file NOT_EXECUTED tests."""
+ conf = configuration(pytestconfig, "", SupportedLanguage.JAVASCRIPT, tmp_path)
+ result = StringIO()
+ bundle = create_bundle(conf, result, FILE_OUTPUT_SUITE)
+ collector = OutputManager(out=result)
+
+ # Open judgement but don't execute anything.
+ collector.add(StartJudgement())
+
+ terminate(bundle, collector, status_if_unclosed=Status.RUNTIME_ERROR)
+
+ updates = assert_valid_output(result.getvalue(), pytestconfig)
+
+ # Each file should produce its own NOT_EXECUTED test.
+ start_tests = updates.find_all("start-test")
+ assert len(start_tests) == 2
+ assert start_tests[0]["channel"] == "out1.txt"
+ assert start_tests[1]["channel"] == "out2.txt"
+
+ assert updates.find_status_enum() == [
+ "runtime error",
+ "wrong",
+ "wrong",
+ ]
+
+
+def test_file_output_channel_rejects_none_path():
+ """Test that FileOutputChannel rejects files with path=None."""
+ with pytest.raises(ValueError, match="File path must be set"):
+ FileOutputChannel(
+ files=[
+ TextData(path=None, content="expected1"),
+ TextData(path="out2.txt", content="expected2"),
+ ]
+ )
diff --git a/tests/test_dsl_legacy.py b/tests/test_dsl_legacy.py
index 7205abaed..fe31d8090 100644
--- a/tests/test_dsl_legacy.py
+++ b/tests/test_dsl_legacy.py
@@ -1,6 +1,6 @@
from tested.dsl.translate_parser import parse_dsl
from tested.serialisation import FunctionCall
-from tested.testsuite import ContentPath
+from tested.testsuite import ContentPath, CustomCheckOracle, FileOutputChannel
def test_dsl_tab_legacy_fields():
@@ -136,3 +136,97 @@ def test_dsl_mixed_files_input_files_priority():
assert len(testcase.input_files) == 1
assert testcase.input_files[0].path == "local.txt"
assert testcase.input_files[0].content == "local content\n"
+
+
+def test_dsl_testcase_output_files_legacy_builtin():
+ yaml_legacy_builtin = """
+- tab: "Legacy Builtin"
+ testcases:
+ - expression: "test()"
+ file:
+ content: "expected.txt"
+ location: "actual.txt"
+ """
+ suite = parse_dsl(yaml_legacy_builtin)
+ file_output = suite.tabs[0].contexts[0].testcases[0].output.file
+ assert isinstance(file_output, FileOutputChannel)
+ assert len(file_output.files) == 1
+ assert file_output.files[0].path == "actual.txt"
+ assert file_output.files[0].content == ContentPath(path="expected.txt")
+
+
+def test_dsl_testcase_output_files_new_builtin():
+ yaml_new_builtin = """
+- tab: "New Builtin"
+ testcases:
+ - expression: "test()"
+ output_files:
+ data:
+ - path: "actual.txt"
+ content: "expected content"
+ oracle: "builtin"
+ """
+ suite = parse_dsl(yaml_new_builtin)
+ file_output = suite.tabs[0].contexts[0].testcases[0].output.file
+ assert isinstance(file_output, FileOutputChannel)
+ assert len(file_output.files) == 1
+ assert file_output.files[0].path == "actual.txt"
+ assert file_output.files[0].content == "expected content\n"
+
+
+def test_dsl_testcase_output_files_simple_array():
+ yaml_simple_array = """
+- tab: "Simple Array"
+ testcases:
+ - expression: "test()"
+ output_files:
+ - path: "file1.txt"
+ content: "content1"
+ - path: "file2.txt"
+ content: "content2"
+ """
+ suite = parse_dsl(yaml_simple_array)
+ file_output = suite.tabs[0].contexts[0].testcases[0].output.file
+ assert isinstance(file_output, FileOutputChannel)
+ assert len(file_output.files) == 2
+ assert file_output.files[0].path == "file1.txt"
+ assert file_output.files[1].path == "file2.txt"
+
+
+def test_dsl_testcase_output_files_legacy_custom():
+ yaml_legacy_custom = """
+- tab: "Legacy Custom"
+ testcases:
+ - expression: "test()"
+ file:
+ oracle: "custom_check"
+ file: "checker.py"
+ content: "expected.txt"
+ location: "actual.txt"
+ """
+ suite = parse_dsl(yaml_legacy_custom)
+ file_output = suite.tabs[0].contexts[0].testcases[0].output.file
+ assert isinstance(file_output, FileOutputChannel)
+ assert isinstance(file_output.oracle, CustomCheckOracle)
+ assert str(file_output.oracle.function.file).endswith("checker.py")
+ assert file_output.files[0].path == "actual.txt"
+
+
+def test_dsl_testcase_output_files_new_custom():
+ yaml_new_custom = """
+- tab: "New Custom"
+ testcases:
+ - expression: "test()"
+ output_files:
+ oracle: "custom_check"
+ file: "checker.py"
+ data:
+ - path: "actual.txt"
+ content: "expected content"
+ """
+ suite = parse_dsl(yaml_new_custom)
+ file_output = suite.tabs[0].contexts[0].testcases[0].output.file
+ assert isinstance(file_output, FileOutputChannel)
+ assert isinstance(file_output.oracle, CustomCheckOracle)
+ assert str(file_output.oracle.function.file).endswith("checker.py")
+ assert file_output.files[0].path == "actual.txt"
diff --git a/tests/test_dsl_yaml.py b/tests/test_dsl_yaml.py
index 3c7109493..0102e4dd5 100644
--- a/tests/test_dsl_yaml.py
+++ b/tests/test_dsl_yaml.py
@@ -770,8 +770,9 @@ def test_file_custom_check_correct():
assert isinstance(test.input, FunctionCall)
assert isinstance(test.output.file, FileOutputChannel)
assert isinstance(test.output.file.oracle, CustomCheckOracle)
- assert test.output.file.actual_path == "test.txt"
- assert test.output.file.expected_path == "test/hallo.txt"
+ assert len(test.output.file.files) == 1
+ assert test.output.file.files[0].content == ContentPath(path="test/hallo.txt")
+ assert test.output.file.files[0].path == "test.txt"
oracle = test.output.file.oracle
assert oracle.function.name == "evaluate_test"
assert oracle.function.file == Path("test.py")
diff --git a/tests/test_file_linking.py b/tests/test_file_linking.py
index 859b59f69..58b1c37cb 100644
--- a/tests/test_file_linking.py
+++ b/tests/test_file_linking.py
@@ -25,6 +25,7 @@ def test_link_files_message_single_file():
]
message = link_files_message(link_files)
+ assert message
assert isinstance(message.message, ExtendedMessage)
assert message.message.format == "html"
assert 'href="path/to/data.txt"' in message.message.description
@@ -40,6 +41,7 @@ def test_link_files_message_multiple_files():
message = link_files_message(link_files)
+ assert message
assert isinstance(message.message, ExtendedMessage)
assert 'href="url1"' in message.message.description
assert "file1.txt" in message.message.description
@@ -56,6 +58,7 @@ def test_link_files_message_inline_content_ignored():
]
message = link_files_message(link_files)
+ assert message
assert isinstance(message.message, ExtendedMessage)
assert 'href="linked-url"' in message.message.description
assert "linked.txt" in message.message.description
@@ -67,8 +70,7 @@ def test_link_files_message_no_path_ignored():
message = link_files_message(link_files)
# If path is None, it should be ignored by link_files_message
- assert isinstance(message.message, ExtendedMessage)
- assert "href=" not in message.message.description
+ assert message is None
def test_readable_input_file_linking(tmp_path: Path, pytestconfig: pytest.Config):
diff --git a/tests/test_io_exercises.py b/tests/test_io_exercises.py
index 94d181f5f..d639db76d 100644
--- a/tests/test_io_exercises.py
+++ b/tests/test_io_exercises.py
@@ -213,3 +213,18 @@ def test_io_function_exercise_haskell_io(
result = execute_config(conf)
updates = assert_valid_output(result, pytestconfig)
assert updates.find_status_enum() == ["correct"]
+
+
+@pytest.mark.parametrize("language", ALL_LANGUAGES)
+def test_file_combinations(language: str, tmp_path: Path, pytestconfig: pytest.Config):
+ conf = configuration(
+ pytestconfig,
+ "time-2-code",
+ language,
+ tmp_path,
+ "plan.yml",
+ "solution",
+ )
+ result = execute_config(conf)
+ updates = assert_valid_output(result, pytestconfig)
+ assert updates.find_status_enum() == ["correct"] * 4
diff --git a/tests/test_oracles_builtin.py b/tests/test_oracles_builtin.py
index 5ea233573..c5baa256e 100644
--- a/tests/test_oracles_builtin.py
+++ b/tests/test_oracles_builtin.py
@@ -11,7 +11,8 @@
from tested.dodona import Status
from tested.oracles.common import OracleConfig
from tested.oracles.exception import evaluate as evaluate_exception
-from tested.oracles.text import evaluate_file, evaluate_text
+from tested.oracles.file import evaluate_file
+from tested.oracles.text import evaluate_text
from tested.oracles.value import evaluate as evaluate_value
from tested.parsing import get_converter
from tested.serialisation import (
@@ -22,11 +23,13 @@
StringType,
)
from tested.testsuite import (
+ ContentPath,
ExceptionOutputChannel,
ExpectedException,
FileOutputChannel,
Suite,
SupportedLanguage,
+ TextData,
TextOutputChannel,
ValueOutputChannel,
)
@@ -140,7 +143,7 @@ def test_file_oracle_full_wrong(
tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture
):
config = oracle_config(tmp_path, pytestconfig, {"mode": "full"})
- s = mocker.spy(tested.oracles.text, name="compare_text") # type: ignore[reportAttributeAccessIssue]
+ s = mocker.spy(tested.oracles.file, name="compare_text") # type: ignore[reportAttributeAccessIssue]
mock_files = [
mocker.mock_open(read_data=content).return_value
for content in ["expected\nexpected", "actual\nactual"]
@@ -149,20 +152,22 @@ def test_file_oracle_full_wrong(
mock_opener.side_effect = mock_files
mocker.patch("builtins.open", mock_opener)
channel = FileOutputChannel(
- expected_path="expected.txt", actual_path="expected.txt"
+ files=[TextData(path="expected.txt", content=ContentPath(path="expected.txt"))],
)
result = evaluate_file(config, channel, "")
s.assert_called_once_with(ANY, "expected\nexpected", "actual\nactual")
- assert result.result.enum == Status.WRONG
- assert result.readable_expected == "expected\nexpected"
- assert result.readable_actual == "actual\nactual"
+ assert isinstance(result, list)
+ assert len(result) == 1
+ assert result[0].result.enum == Status.WRONG
+ assert result[0].readable_expected == "expected\nexpected"
+ assert result[0].readable_actual == "actual\nactual"
def test_file_oracle_full_correct(
tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture
):
config = oracle_config(tmp_path, pytestconfig, {"mode": "full"})
- s = mocker.spy(tested.oracles.text, name="compare_text") # type: ignore[reportAttributeAccessIssue]
+ s = mocker.spy(tested.oracles.file, name="compare_text") # type: ignore[reportAttributeAccessIssue]
mock_files = [
mocker.mock_open(read_data=content).return_value
for content in ["expected\nexpected", "expected\nexpected"]
@@ -171,13 +176,16 @@ def test_file_oracle_full_correct(
mock_opener.side_effect = mock_files
mocker.patch("builtins.open", mock_opener)
channel = FileOutputChannel(
- expected_path="expected.txt", actual_path="expected.txt"
+ files=[TextData(path="expected.txt", content=ContentPath(path="expected.txt"))],
)
result = evaluate_file(config, channel, "")
s.assert_called_once_with(ANY, "expected\nexpected", "expected\nexpected")
- assert result.result.enum == Status.CORRECT
- assert result.readable_expected == "expected\nexpected"
- assert result.readable_actual == "expected\nexpected"
+
+ assert isinstance(result, list)
+ assert len(result) == 1
+ assert result[0].result.enum == Status.CORRECT
+ assert result[0].readable_expected == "expected\nexpected"
+ assert result[0].readable_actual == "expected\nexpected"
def test_file_oracle_line_wrong(
@@ -186,7 +194,7 @@ def test_file_oracle_line_wrong(
config = oracle_config(
tmp_path, pytestconfig, {"mode": "line", "stripNewlines": True}
)
- s = mocker.spy(tested.oracles.text, name="compare_text") # type: ignore[reportAttributeAccessIssue]
+ s = mocker.spy(tested.oracles.file, name="compare_text") # type: ignore[reportAttributeAccessIssue]
mock_files = [
mocker.mock_open(read_data=content).return_value
for content in ["expected\nexpected2", "actual\nactual2"]
@@ -195,15 +203,18 @@ def test_file_oracle_line_wrong(
mock_opener.side_effect = mock_files
mocker.patch("builtins.open", mock_opener)
channel = FileOutputChannel(
- expected_path="expected.txt", actual_path="expected.txt"
+ files=[TextData(path="expected.txt", content=ContentPath(path="expected.txt"))],
)
result = evaluate_file(config, channel, "")
s.assert_any_call(ANY, "expected", "actual")
s.assert_any_call(ANY, "expected2", "actual2")
assert s.call_count == 2
- assert result.result.enum == Status.WRONG
- assert result.readable_expected == "expected\nexpected2"
- assert result.readable_actual == "actual\nactual2"
+
+ assert isinstance(result, list)
+ assert len(result) == 1
+ assert result[0].result.enum == Status.WRONG
+ assert result[0].readable_expected == "expected\nexpected2"
+ assert result[0].readable_actual == "actual\nactual2"
def test_file_oracle_line_correct(
@@ -212,7 +223,7 @@ def test_file_oracle_line_correct(
config = oracle_config(
tmp_path, pytestconfig, {"mode": "line", "stripNewlines": True}
)
- s = mocker.spy(tested.oracles.text, name="compare_text") # type: ignore[reportAttributeAccessIssue]
+ s = mocker.spy(tested.oracles.file, name="compare_text") # type: ignore[reportAttributeAccessIssue]
mock_files = [
mocker.mock_open(read_data=content).return_value
for content in ["expected\nexpected2", "expected\nexpected2"]
@@ -221,15 +232,18 @@ def test_file_oracle_line_correct(
mock_opener.side_effect = mock_files
mocker.patch("builtins.open", mock_opener)
channel = FileOutputChannel(
- expected_path="expected.txt", actual_path="expected.txt"
+ files=[TextData(path="expected.txt", content=ContentPath(path="expected.txt"))]
)
result = evaluate_file(config, channel, "")
s.assert_any_call(ANY, "expected", "expected")
s.assert_any_call(ANY, "expected2", "expected2")
assert s.call_count == 2
- assert result.result.enum == Status.CORRECT
- assert result.readable_expected == "expected\nexpected2"
- assert result.readable_actual == "expected\nexpected2"
+
+ assert isinstance(result, list)
+ assert len(result) == 1
+ assert result[0].result.enum == Status.CORRECT
+ assert result[0].readable_expected == "expected\nexpected2"
+ assert result[0].readable_actual == "expected\nexpected2"
def test_file_oracle_strip_lines_correct(
@@ -238,7 +252,7 @@ def test_file_oracle_strip_lines_correct(
config = oracle_config(
tmp_path, pytestconfig, {"mode": "line", "stripNewlines": True}
)
- s = mocker.spy(tested.oracles.text, name="compare_text") # type: ignore[reportAttributeAccessIssue]
+ s = mocker.spy(tested.oracles.file, name="compare_text") # type: ignore[reportAttributeAccessIssue]
mock_files = [
mocker.mock_open(read_data=content).return_value
for content in ["expected\nexpected2\n", "expected\nexpected2"]
@@ -247,15 +261,18 @@ def test_file_oracle_strip_lines_correct(
mock_opener.side_effect = mock_files
mocker.patch("builtins.open", mock_opener)
channel = FileOutputChannel(
- expected_path="expected.txt", actual_path="expected.txt"
+ files=[TextData(path="expected.txt", content=ContentPath(path="expected.txt"))]
)
result = evaluate_file(config, channel, "")
s.assert_any_call(ANY, "expected", "expected")
s.assert_any_call(ANY, "expected2", "expected2")
assert s.call_count == 2
- assert result.result.enum == Status.CORRECT
- assert result.readable_expected == "expected\nexpected2\n"
- assert result.readable_actual == "expected\nexpected2"
+
+ assert isinstance(result, list)
+ assert len(result) == 1
+ assert result[0].result.enum == Status.CORRECT
+ assert result[0].readable_expected == "expected\nexpected2\n"
+ assert result[0].readable_actual == "expected\nexpected2"
def test_file_oracle_dont_strip_lines_correct(
@@ -264,7 +281,7 @@ def test_file_oracle_dont_strip_lines_correct(
config = oracle_config(
tmp_path, pytestconfig, {"mode": "line", "stripNewlines": False}
)
- s = mocker.spy(tested.oracles.text, name="compare_text") # type: ignore[reportAttributeAccessIssue]
+ s = mocker.spy(tested.oracles.file, name="compare_text") # type: ignore[reportAttributeAccessIssue]
mock_files = [
mocker.mock_open(read_data=content).return_value
for content in ["expected\nexpected2\n", "expected\nexpected2\n"]
@@ -273,15 +290,100 @@ def test_file_oracle_dont_strip_lines_correct(
mock_opener.side_effect = mock_files
mocker.patch("builtins.open", mock_opener)
channel = FileOutputChannel(
- expected_path="expected.txt", actual_path="expected.txt"
+ files=[TextData(path="expected.txt", content=ContentPath(path="expected.txt"))]
)
result = evaluate_file(config, channel, "")
s.assert_any_call(ANY, "expected\n", "expected\n")
s.assert_any_call(ANY, "expected2\n", "expected2\n")
assert s.call_count == 2
- assert result.result.enum == Status.CORRECT
- assert result.readable_expected == "expected\nexpected2\n"
- assert result.readable_actual == "expected\nexpected2\n"
+
+ assert isinstance(result, list)
+ assert len(result) == 1
+ assert result[0].result.enum == Status.CORRECT
+ assert result[0].readable_expected == "expected\nexpected2\n"
+ assert result[0].readable_actual == "expected\nexpected2\n"
+
+
+def test_file_oracle_multiple_files_correct(
+ tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture
+):
+ config = oracle_config(tmp_path, pytestconfig, {"mode": "full"})
+ s = mocker.spy(tested.oracles.file, name="compare_text") # type: ignore[reportAttributeAccessIssue]
+ mock_files = [
+ mocker.mock_open(read_data=content).return_value
+ for content in [
+ "expected\nexpected2\n",
+ "expected\nexpected2\n",
+ "expected\nexpected4\n",
+ "expected\nexpected4\n",
+ ]
+ ]
+ mock_opener = mocker.mock_open()
+ mock_opener.side_effect = mock_files
+ mocker.patch("builtins.open", mock_opener)
+ channel = FileOutputChannel(
+ files=[
+ TextData(path="expected.txt", content=ContentPath(path="expected.txt")),
+ TextData(
+ path="another_file.txt", content=ContentPath(path="another_path.txt")
+ ),
+ ]
+ )
+ result = evaluate_file(config, channel, "")
+ s.assert_any_call(ANY, "expected\nexpected2\n", "expected\nexpected2\n")
+ s.assert_any_call(ANY, "expected\nexpected4\n", "expected\nexpected4\n")
+ assert s.call_count == 2
+
+ assert isinstance(result, list)
+ assert len(result) == 2
+ assert result[0].result.enum == Status.CORRECT
+ assert result[0].readable_expected == "expected\nexpected2\n"
+ assert result[0].readable_actual == "expected\nexpected2\n"
+
+ assert result[1].result.enum == Status.CORRECT
+ assert result[1].readable_expected == "expected\nexpected4\n"
+ assert result[1].readable_actual == "expected\nexpected4\n"
+
+
+def test_file_oracle_multiple_files_one_wrong(
+ tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture
+):
+ config = oracle_config(tmp_path, pytestconfig, {"mode": "full"})
+ s = mocker.spy(tested.oracles.file, name="compare_text") # type: ignore[reportAttributeAccessIssue]
+ mock_files = [
+ mocker.mock_open(read_data=content).return_value
+ for content in [
+ "expected\nexpected2\n",
+ "expected\nexpected2\n",
+ "expected\nexpected4\n",
+ "expected\nexpected3\n",
+ ]
+ ]
+ mock_opener = mocker.mock_open()
+ mock_opener.side_effect = mock_files
+ mocker.patch("builtins.open", mock_opener)
+ channel = FileOutputChannel(
+ files=[
+ TextData(path="expected.txt", content=ContentPath(path="expected.txt")),
+ TextData(
+ path="another_file.txt", content=ContentPath(path="another_path.txt")
+ ),
+ ]
+ )
+ result = evaluate_file(config, channel, "")
+ s.assert_any_call(ANY, "expected\nexpected2\n", "expected\nexpected2\n")
+ s.assert_any_call(ANY, "expected\nexpected4\n", "expected\nexpected3\n")
+ assert s.call_count == 2
+
+ assert isinstance(result, list)
+ assert len(result) == 2
+ assert result[0].result.enum == Status.CORRECT
+ assert result[0].readable_expected == "expected\nexpected2\n"
+ assert result[0].readable_actual == "expected\nexpected2\n"
+
+ assert result[1].result.enum == Status.WRONG
+ assert result[1].readable_expected == "expected\nexpected4\n"
+ assert result[1].readable_actual == "expected\nexpected3\n"
def test_exception_oracle_only_messages_correct(
diff --git a/tests/test_oracles_programmed.py b/tests/test_oracles_programmed.py
index e3d12b5a8..637e5bbe5 100644
--- a/tests/test_oracles_programmed.py
+++ b/tests/test_oracles_programmed.py
@@ -183,3 +183,37 @@ def test_custom_check_function_static_analysis_missing_language(
updates = assert_valid_output(result, pytestconfig)
assert len(updates.find_all("start-testcase")) == 1
assert updates.find_status_enum() == ["internal error"]
+
+
+@pytest.mark.parametrize("lang", OBJECT_LANGUAGES)
+def test_custom_check_function_files(
+ lang: str, tmp_path: Path, pytestconfig: pytest.Config
+):
+ conf = configuration(
+ pytestconfig,
+ "output-files-custom-oracle",
+ lang,
+ tmp_path,
+ "suite.yaml",
+ "correct",
+ )
+ result = execute_config(conf)
+ updates = assert_valid_output(result, pytestconfig)
+ assert updates.find_status_enum() == ["correct"]
+
+
+@pytest.mark.parametrize("lang", OBJECT_LANGUAGES)
+def test_custom_check_function_files_wrong(
+ lang: str, tmp_path: Path, pytestconfig: pytest.Config
+):
+ conf = configuration(
+ pytestconfig,
+ "output-files-custom-oracle",
+ lang,
+ tmp_path,
+ "suite.yaml",
+ "wrong",
+ )
+ result = execute_config(conf)
+ updates = assert_valid_output(result, pytestconfig)
+ assert updates.find_status_enum() == ["wrong"]
diff --git a/tests/test_planning.py b/tests/test_planning.py
new file mode 100644
index 000000000..3acd0a27c
--- /dev/null
+++ b/tests/test_planning.py
@@ -0,0 +1,509 @@
+import sys
+from pathlib import Path
+
+import pytest
+
+from tested.configs import create_bundle
+from tested.judge.planning import (
+ PlannedContext,
+ PlannedExecutionUnit,
+ PlanStrategy,
+ plan_test_suite,
+)
+from tested.testsuite import (
+ Context,
+ EmptyChannel,
+ ExitCodeOutputChannel,
+ FileOutputChannel,
+ LanguageLiterals,
+ MainInput,
+ Output,
+ Suite,
+ SupportedLanguage,
+ Tab,
+)
+from tested.testsuite import Testcase as SuiteTestcase
+from tested.testsuite import TextData
+from tests.manual_utils import configuration
+
+
+def test_planned_execution_unit_get_stdin():
+ resources = Path("resources")
+ tc1 = SuiteTestcase(
+ input=MainInput(stdin=TextData(content="stdin1")), input_files=[]
+ )
+ tc2 = SuiteTestcase(
+ input=MainInput(stdin=TextData(content="stdin2")), input_files=[]
+ )
+ tc3 = SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[])
+
+ pc1 = PlannedContext(context=Context(testcases=[tc1]), tab_index=0, context_index=0)
+ pc2 = PlannedContext(context=Context(testcases=[tc2]), tab_index=0, context_index=1)
+ pc3 = PlannedContext(context=Context(testcases=[tc3]), tab_index=0, context_index=2)
+
+ unit = PlannedExecutionUnit(contexts=[pc1, pc2, pc3], name="unit", index=0)
+ # tc1 has stdin1, tc2 has stdin2, tc3 has no stdin.
+ assert unit.get_stdin(resources) == "stdin1stdin2"
+
+
+def test_planned_execution_unit_has_main_testcase():
+ tc_main = SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[])
+ tc_not_main = SuiteTestcase(
+ input=LanguageLiterals(literals={SupportedLanguage.PYTHON: "5"}), input_files=[]
+ )
+
+ pc_main = PlannedContext(
+ context=Context(testcases=[tc_main]), tab_index=0, context_index=0
+ )
+ pc_not_main = PlannedContext(
+ context=Context(testcases=[tc_not_main]), tab_index=0, context_index=1
+ )
+
+ unit_with_main = PlannedExecutionUnit(contexts=[pc_main], name="unit1", index=0)
+ assert unit_with_main.has_main_testcase() is True
+
+ unit_without_main = PlannedExecutionUnit(
+ contexts=[pc_not_main], name="unit2", index=1
+ )
+ assert unit_without_main.has_main_testcase() is False
+
+
+def test_planned_execution_unit_has_exit_testcase():
+ tc_exit = SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE),
+ output=Output(exit_code=ExitCodeOutputChannel(value=0)),
+ input_files=[],
+ )
+ tc_no_exit = SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[])
+
+ pc_exit = PlannedContext(
+ context=Context(testcases=[tc_exit]), tab_index=0, context_index=0
+ )
+ pc_no_exit = PlannedContext(
+ context=Context(testcases=[tc_no_exit]), tab_index=0, context_index=1
+ )
+
+ unit_with_exit = PlannedExecutionUnit(contexts=[pc_exit], name="unit1", index=0)
+ assert unit_with_exit.has_exit_testcase() is True
+
+ unit_without_exit = PlannedExecutionUnit(
+ contexts=[pc_no_exit], name="unit2", index=1
+ )
+ assert unit_without_exit.has_exit_testcase() is False
+
+
+def test_plan_strategy_optimal(tmp_path: Path, pytestconfig: pytest.Config):
+ tc = SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[])
+ ctx1 = Context(testcases=[tc])
+ ctx2 = Context(testcases=[tc])
+ tab1 = Tab(name="tab1", contexts=[ctx1, ctx2])
+ suite = Suite(tabs=[tab1])
+
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 1
+ assert len(units[0].contexts) == 2
+ assert units[0].name == "execution_0"
+
+
+def test_plan_strategy_optimal_multiple_tabs(
+ tmp_path: Path, pytestconfig: pytest.Config
+):
+ tc = SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[])
+ ctx1 = Context(testcases=[tc])
+ ctx2 = Context(testcases=[tc])
+ tab1 = Tab(name="tab1", contexts=[ctx1])
+ tab2 = Tab(name="tab2", contexts=[ctx2])
+ suite = Suite(tabs=[tab1, tab2])
+
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ # Should be 1 unit because there are no conflicts
+ assert len(units) == 1
+ assert len(units[0].contexts) == 2
+ assert units[0].contexts[0].tab_index == 0
+ assert units[0].contexts[1].tab_index == 1
+
+
+def test_plan_strategy_tab(tmp_path: Path, pytestconfig: pytest.Config):
+ tc = SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[])
+ ctx1 = Context(testcases=[tc])
+ ctx2 = Context(testcases=[tc])
+ tab1 = Tab(name="tab1", contexts=[ctx1])
+ tab2 = Tab(name="tab2", contexts=[ctx2])
+ suite = Suite(tabs=[tab1, tab2])
+
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.TAB)
+
+ assert len(units) == 2
+ assert len(units[0].contexts) == 1
+ assert len(units[1].contexts) == 1
+ assert units[0].name == "execution_0"
+ assert units[1].name == "execution_1"
+
+
+def test_plan_strategy_context(tmp_path: Path, pytestconfig: pytest.Config):
+ tc = SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[])
+ ctx1 = Context(testcases=[tc])
+ ctx2 = Context(testcases=[tc])
+ tab1 = Tab(name="tab1", contexts=[ctx1, ctx2])
+ suite = Suite(tabs=[tab1])
+
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.CONTEXT)
+
+ assert len(units) == 2
+ assert len(units[0].contexts) == 1
+ assert len(units[1].contexts) == 1
+
+
+def test_planning_conflict_input_files(tmp_path: Path, pytestconfig: pytest.Config):
+ # Same path, different content
+ file1 = TextData(path="file.txt", content="content1")
+ file2 = TextData(path="file.txt", content="content2")
+
+ ctx1 = Context(
+ testcases=[
+ SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[file1])
+ ]
+ )
+ ctx2 = Context(
+ testcases=[
+ SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[file2])
+ ]
+ )
+
+ tab = Tab(name="tab1", contexts=[ctx1, ctx2])
+ suite = Suite(tabs=[tab])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 2
+
+
+def test_planning_no_conflict_input_files(tmp_path: Path, pytestconfig: pytest.Config):
+ # Same path, same content
+ file1 = TextData(path="file.txt", content="content1")
+ file2 = TextData(path="file.txt", content="content1")
+
+ ctx1 = Context(
+ testcases=[
+ SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[file1])
+ ]
+ )
+ ctx2 = Context(
+ testcases=[
+ SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[file2])
+ ]
+ )
+
+ tab = Tab(name="tab1", contexts=[ctx1, ctx2])
+ suite = Suite(tabs=[tab])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 1
+
+
+def test_planning_conflict_output_files(tmp_path: Path, pytestconfig: pytest.Config):
+ # Same output path
+ file1 = TextData(path="out.txt", content="content1")
+ file2 = TextData(path="out.txt", content="content2")
+
+ tc1 = SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE),
+ output=Output(file=FileOutputChannel(files=[file1])),
+ )
+ tc2 = SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE),
+ output=Output(file=FileOutputChannel(files=[file2])),
+ )
+
+ ctx1 = Context(testcases=[tc1])
+ ctx2 = Context(testcases=[tc2])
+
+ tab = Tab(name="tab1", contexts=[ctx1, ctx2])
+ suite = Suite(tabs=[tab])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 2
+
+
+def test_planning_conflict_stdin(tmp_path: Path, pytestconfig: pytest.Config):
+ # Context with stdin should start a new unit
+ tc1 = SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[])
+ tc2 = SuiteTestcase(
+ input=MainInput(stdin=TextData(content="stdin")), input_files=[]
+ )
+
+ ctx1 = Context(testcases=[tc1])
+ ctx2 = Context(testcases=[tc2])
+
+ tab = Tab(name="tab1", contexts=[ctx1, ctx2])
+ suite = Suite(tabs=[tab])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 2
+ assert len(units[0].contexts) == 1
+ assert len(units[1].contexts) == 1
+ assert units[1].contexts[0].context == ctx2
+
+
+def test_planning_conflict_exit_code(tmp_path: Path, pytestconfig: pytest.Config):
+ # Context with exit code check should end the unit
+ tc1 = SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE),
+ output=Output(exit_code=ExitCodeOutputChannel(value=0)),
+ )
+ tc2 = SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE))
+
+ ctx1 = Context(testcases=[tc1])
+ ctx2 = Context(testcases=[tc2])
+
+ tab = Tab(name="tab1", contexts=[ctx1, ctx2])
+ suite = Suite(tabs=[tab])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 2
+ assert len(units[0].contexts) == 1
+ assert len(units[1].contexts) == 1
+ assert units[0].contexts[0].context == ctx1
+
+
+def test_plan_empty_suite(tmp_path: Path, pytestconfig: pytest.Config):
+ suite = Suite(tabs=[])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+ assert len(units) == 0
+
+
+def test_planning_stdin_no_split_after(tmp_path: Path, pytestconfig: pytest.Config):
+ # [Stdin, NoStdin] -> 1 unit (no split because the second one has no stdin)
+ tc1 = SuiteTestcase(input=MainInput(stdin=TextData(content="stdin")))
+ tc2 = SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE))
+
+ ctx1 = Context(testcases=[tc1])
+ ctx2 = Context(testcases=[tc2])
+
+ tab = Tab(name="tab1", contexts=[ctx1, ctx2])
+ suite = Suite(tabs=[tab])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 1
+ assert len(units[0].contexts) == 2
+
+
+def test_planning_exit_code_no_split_before(
+ tmp_path: Path, pytestconfig: pytest.Config
+):
+ # [NoExitCode, ExitCode] -> 1 unit (no split because the first one has no exit code, and the second one ends the unit)
+ tc1 = SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE))
+ tc2 = SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE),
+ output=Output(exit_code=ExitCodeOutputChannel(value=0)),
+ )
+
+ ctx1 = Context(testcases=[tc1])
+ ctx2 = Context(testcases=[tc2])
+
+ tab = Tab(name="tab1", contexts=[ctx1, ctx2])
+ suite = Suite(tabs=[tab])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 1
+ assert len(units[0].contexts) == 2
+
+
+def test_planning_multiple_input_files_conflict(
+ tmp_path: Path, pytestconfig: pytest.Config
+):
+ # Context with multiple input files, one of which conflicts
+ file1 = TextData(path="file1.txt", content="content1")
+ file2 = TextData(path="file2.txt", content="content2")
+ file1_alt = TextData(path="file1.txt", content="content1_alt")
+
+ ctx1 = Context(
+ testcases=[
+ SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE), input_files=[file1, file2]
+ )
+ ]
+ )
+ ctx2 = Context(
+ testcases=[
+ SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE), input_files=[file1_alt]
+ )
+ ]
+ )
+
+ tab = Tab(name="tab1", contexts=[ctx1, ctx2])
+ suite = Suite(tabs=[tab])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 2
+
+
+def test_planning_multiple_output_files_conflict(
+ tmp_path: Path, pytestconfig: pytest.Config
+):
+ # Context with multiple output files, one of which conflicts
+ out1 = TextData(path="out1.txt", content="content1")
+ out2 = TextData(path="out2.txt", content="content2")
+ out1_alt = TextData(path="out1.txt", content="content1_alt")
+
+ tc1 = SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE),
+ output=Output(file=FileOutputChannel(files=[out1, out2])),
+ )
+ tc2 = SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE),
+ output=Output(file=FileOutputChannel(files=[out1_alt])),
+ )
+
+ ctx1 = Context(testcases=[tc1])
+ ctx2 = Context(testcases=[tc2])
+
+ tab = Tab(name="tab1", contexts=[ctx1, ctx2])
+ suite = Suite(tabs=[tab])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 2
+
+
+def test_planning_state_cleared_after_split(
+ tmp_path: Path, pytestconfig: pytest.Config
+):
+ # Verify that file tracking is cleared after a split
+ file1 = TextData(path="file1.txt", content="content1")
+ file1_alt = TextData(path="file1.txt", content="content1_alt")
+
+ # ctx1 and ctx2 will split because of stdin
+ ctx1 = Context(
+ testcases=[
+ SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[file1])
+ ]
+ )
+ ctx2 = Context(
+ testcases=[
+ SuiteTestcase(
+ input=MainInput(stdin=TextData(content="stdin")),
+ input_files=[file1_alt],
+ )
+ ]
+ )
+ ctx3 = Context(
+ testcases=[
+ SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[file1])
+ ]
+ )
+
+ tab = Tab(name="tab1", contexts=[ctx1, ctx2, ctx3])
+ suite = Suite(tabs=[tab])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 3
+ assert len(units[0].contexts) == 1
+ assert len(units[1].contexts) == 1
+ assert len(units[2].contexts) == 1
+
+
+def test_planning_mixed_conflicts(tmp_path: Path, pytestconfig: pytest.Config):
+ file1 = TextData(path="file1.txt", content="content1")
+ file1_alt = TextData(path="file1.txt", content="content2")
+ out2 = TextData(path="file2.txt", content="out")
+
+ ctx1 = Context(
+ testcases=[
+ SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[file1])
+ ]
+ )
+ ctx2 = Context(
+ testcases=[SuiteTestcase(input=MainInput(stdin=TextData(content="stdin")))]
+ )
+ ctx3 = Context(
+ testcases=[
+ SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE),
+ output=Output(file=FileOutputChannel(files=[out2])),
+ )
+ ]
+ )
+ ctx4 = Context(
+ testcases=[
+ SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE), input_files=[file1_alt]
+ )
+ ]
+ )
+ ctx5 = Context(
+ testcases=[
+ SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE),
+ output=Output(exit_code=ExitCodeOutputChannel(value=0)),
+ )
+ ]
+ )
+ ctx6 = Context(testcases=[SuiteTestcase(input=MainInput(stdin=EmptyChannel.NONE))])
+
+ tab = Tab(name="tab1", contexts=[ctx1, ctx2, ctx3, ctx4, ctx5, ctx6])
+ suite = Suite(tabs=[tab])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 3
+ assert [len(u.contexts) for u in units] == [1, 4, 1]
+ assert units[0].contexts[0].context == ctx1
+ assert units[1].contexts[0].context == ctx2
+ assert units[1].contexts[1].context == ctx3
+ assert units[1].contexts[2].context == ctx4
+ assert units[1].contexts[3].context == ctx5
+ assert units[2].contexts[0].context == ctx6
+
+
+def test_planning_sequential_exit_codes(tmp_path: Path, pytestconfig: pytest.Config):
+ # [ExitCode, ExitCode] -> 2 units
+ tc = SuiteTestcase(
+ input=MainInput(stdin=EmptyChannel.NONE),
+ output=Output(exit_code=ExitCodeOutputChannel(value=0)),
+ )
+ ctx1 = Context(testcases=[tc])
+ ctx2 = Context(testcases=[tc])
+
+ tab = Tab(name="tab1", contexts=[ctx1, ctx2])
+ suite = Suite(tabs=[tab])
+ conf = configuration(pytestconfig, "echo-function", "python", tmp_path)
+ bundle = create_bundle(conf, sys.stdout, suite)
+ units = plan_test_suite(bundle, PlanStrategy.OPTIMAL)
+
+ assert len(units) == 2
+ assert len(units[0].contexts) == 1
+ assert len(units[1].contexts) == 1
diff --git a/tests/test_planning_sorting.py b/tests/test_planning_sorting.py
index 89d89f97b..3e2eb02ad 100644
--- a/tests/test_planning_sorting.py
+++ b/tests/test_planning_sorting.py
@@ -60,3 +60,120 @@ def test_get_dynamically_generated_files_stdin_sorting():
assert generated_files[0].content.path == "url1"
assert isinstance(generated_files[1].content, ContentPath)
assert generated_files[1].content.path == "url2"
+
+
+def test_get_dynamically_generated_files_empty():
+ """An execution unit with no contexts should return an empty list."""
+ unit = PlannedExecutionUnit(contexts=[], name="empty_unit", index=0)
+ assert unit.get_dynamically_generated_files() == []
+
+
+def test_get_dynamically_generated_files_no_files():
+ """Contexts with testcases but no input files or stdin should return empty."""
+ testcase = Testcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[])
+ context = Context(testcases=[testcase])
+ planned = PlannedContext(context=context, tab_index=0, context_index=0)
+
+ unit = PlannedExecutionUnit(contexts=[planned], name="no_files_unit", index=0)
+ assert unit.get_dynamically_generated_files() == []
+
+
+def test_get_dynamically_generated_files_not_dynamic():
+ """
+ Files where the `path` matches the `ContentPath.path` are considered static
+ by `TextData.is_dynamically_generated()` and should be ignored.
+ """
+ # static.txt maps exactly to the content path static.txt
+ static_file = TextData(path="static.txt", content=ContentPath(path="static.txt"))
+
+ testcase = Testcase(input=MainInput(stdin=static_file), input_files=[static_file])
+ context = Context(testcases=[testcase])
+ planned = PlannedContext(context=context, tab_index=0, context_index=0)
+
+ unit = PlannedExecutionUnit(contexts=[planned], name="static_unit", index=0)
+ assert unit.get_dynamically_generated_files() == []
+
+
+def test_get_dynamically_generated_files_deduplication():
+ file1 = TextData(path="data.txt", content="shared content")
+
+ # Same file used in stdin and input_files
+ testcase1 = Testcase(input=MainInput(stdin=file1), input_files=[file1])
+ # Same file used in a completely different context
+ testcase2 = Testcase(input=MainInput(stdin=EmptyChannel.NONE), input_files=[file1])
+
+ planned1 = PlannedContext(
+ context=Context(testcases=[testcase1]), tab_index=0, context_index=0
+ )
+ planned2 = PlannedContext(
+ context=Context(testcases=[testcase2]), tab_index=0, context_index=1
+ )
+
+ unit = PlannedExecutionUnit(
+ contexts=[planned1, planned2], name="dedup_unit", index=0
+ )
+
+ generated_files = unit.get_dynamically_generated_files()
+ assert len(generated_files) == 1
+ assert generated_files[0].path == "data.txt"
+ assert generated_files[0].content == "shared content"
+
+
+def test_get_dynamically_generated_files_complex_sorting():
+ # Group A: Same path "a.txt", different contents to test secondary sort
+ file_a1 = TextData(path="a.txt", content=ContentPath(path="z_url"))
+ file_a2 = TextData(path="a.txt", content="a_string")
+ file_a3 = TextData(path="a.txt", content=ContentPath(path="m_url"))
+
+ # Group B: Different path "b.txt" to test primary sort
+ file_b1 = TextData(path="b.txt", content="b_string")
+
+ testcase = Testcase(
+ input=MainInput(stdin=EmptyChannel.NONE),
+ input_files=[file_b1, file_a1, file_a3, file_a2],
+ )
+ planned = PlannedContext(
+ context=Context(testcases=[testcase]), tab_index=0, context_index=0
+ )
+ unit = PlannedExecutionUnit(contexts=[planned], name="sort_unit", index=0)
+
+ generated = unit.get_dynamically_generated_files()
+
+ assert len(generated) == 4
+
+ # Expected order:
+ # 1. a.txt -> "a_string"
+ # 2. a.txt -> m_url
+ # 3. a.txt -> z_url
+ # 4. b.txt -> "b_string"
+
+ assert generated[0].path == "a.txt"
+ assert generated[0].content == "a_string"
+
+ assert generated[1].path == "a.txt"
+ assert isinstance(generated[1].content, ContentPath)
+ assert generated[1].content.path == "m_url"
+
+ assert generated[2].path == "a.txt"
+ assert isinstance(generated[2].content, ContentPath)
+ assert generated[2].content.path == "z_url"
+
+ assert generated[3].path == "b.txt"
+ assert generated[3].content == "b_string"
+
+
+def test_get_dynamically_generated_files_none_path():
+ """
+ If a file has no path assigned (e.g., stdout expectations),
+ it should not be considered a dynamically generated input file.
+ """
+ # Path is None
+ file_no_path = TextData(path=None, content="content")
+
+ testcase = Testcase(input=MainInput(stdin=file_no_path), input_files=[file_no_path])
+ planned = PlannedContext(
+ context=Context(testcases=[testcase]), tab_index=0, context_index=0
+ )
+ unit = PlannedExecutionUnit(contexts=[planned], name="no_path_unit", index=0)
+
+ assert unit.get_dynamically_generated_files() == []
diff --git a/tests/test_suite.py b/tests/test_suite.py
index a9dd4100e..5e7ea94cd 100644
--- a/tests/test_suite.py
+++ b/tests/test_suite.py
@@ -5,6 +5,7 @@
from tested.parsing import get_converter
from tested.testsuite import (
+ ContentPath,
CustomCheckOracle,
ExceptionOutputChannel,
ExitCodeOutputChannel,
@@ -108,13 +109,13 @@ def test_file_show_expected_is_accepted():
scheme = """
{
"show_expected": true,
- "expected_path": "hallo",
- "actual_path": "hallo"
+ "expected_path": "expected-hallo",
+ "actual_path": "actual-hallo"
}
"""
result = get_converter().loads(scheme, FileOutputChannel)
- assert result.expected_path == "hallo"
- assert result.actual_path == "hallo"
+ assert result.files[0].path == "actual-hallo"
+ assert result.files[0].content == ContentPath(path="expected-hallo")
def test_value_show_expected_is_accepted():
diff --git a/tests/test_testsuite_legacy.py b/tests/test_testsuite_legacy.py
index acc02b0ef..0f93ceb77 100644
--- a/tests/test_testsuite_legacy.py
+++ b/tests/test_testsuite_legacy.py
@@ -66,8 +66,8 @@ def test_file_output_channel_legacy_evaluator():
"evaluator": {"name": "file"},
}
result = get_converter().structure(data, FileOutputChannel)
- assert result.expected_path == "exp.txt"
- assert result.actual_path == "act.txt"
+ assert result.files[0].content == ContentPath(path="exp.txt")
+ assert result.files[0].path == "act.txt"
assert isinstance(result.oracle, GenericTextOracle)
assert result.oracle.name == TextBuiltin.FILE