Skip to content

Commit 02ba0e1

Browse files
committed
Updated R tester to support MarkUs test metadata
1 parent a903ce5 commit 02ba0e1

File tree

4 files changed

+68
-12
lines changed

4 files changed

+68
-12
lines changed

Changelog.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ All notable changes to this project will be documented here.
44
## [unreleased]
55
- Update Python and Jupyter test names to follow `[file] class.funcname` format (#605)
66
- Update r tester test_name formatting and add r tester tests (#606)
7+
- Update R tester to support MarkUs metadata (#615)
78

89
## [v2.7.0]
910
- Update python, pyta and jupyter testers to allow a requirements file (#580)

server/autotest_server/testers/r/lib/r_tester.R

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,10 @@ library(testthat)
33
library(rjson)
44
args <- commandArgs(TRUE)
55
test_results <- testthat::test_file(args[1], reporter = testthat::ListReporter)
6+
annotations <- list()
7+
tags <- list()
8+
overall_comments <- list()
9+
610
for (i in 1:length(test_results)) {
711
for (j in 1:length(test_results[[i]]$results)) {
812
result <- test_results[[i]]$results[[j]]
@@ -15,8 +19,28 @@ for (i in 1:length(test_results)) {
1519
if (!is.null(test_results[[i]]$results[[j]]$trace)) {
1620
test_results[[i]]$results[[j]]$trace <- format(test_results[[i]]$results[[j]]$trace)
1721
}
22+
23+
# Check result for MarkUs metadata
24+
if ("markus_tag" %in% names(attributes(result))) {
25+
tags <- append(tags, attr(result, "markus_tag"))
26+
test_results[[i]]$results[[j]]$type <- "metadata"
27+
}
28+
if ("markus_annotation" %in% names(attributes(result))) {
29+
annotations <- append(annotations, list(attr(result, "markus_annotation")))
30+
test_results[[i]]$results[[j]]$type <- "metadata"
31+
}
32+
if ("markus_overall_comments" %in% names(attributes(result))) {
33+
overall_comments <- append(overall_comments, attr(result, "markus_overall_comments"))
34+
test_results[[i]]$results[[j]]$type <- "metadata"
35+
}
1836
}
1937
}
20-
json <- rjson::toJSON(test_results)
38+
39+
json <- rjson::toJSON(list(
40+
test_results = test_results,
41+
tags = tags,
42+
annotations = annotations,
43+
overall_comments = overall_comments
44+
))
2145
sink()
2246
cat(json)

server/autotest_server/testers/r/r_tester.py

Lines changed: 33 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,10 @@ def run(self):
5151
successes = 0
5252
error = False
5353
for result in self.result:
54+
# Skip results that were only used to specify MarkUs metadata
55+
if result["type"] == "metadata":
56+
continue
57+
5458
# Only add message if not a success, as testthat reports failure messages only
5559
if result["type"] != "expectation_success":
5660
messages.append(result["message"])
@@ -89,6 +93,9 @@ def __init__(
8993
This tester will create tests of type test_class.
9094
"""
9195
super().__init__(specs, test_class, resource_settings=resource_settings)
96+
self.annotations = []
97+
self.overall_comments = []
98+
self.tags = set()
9299

93100
def run_r_tests(self) -> Dict[str, List[Dict[str, Union[int, str]]]]:
94101
"""
@@ -108,7 +115,11 @@ def run_r_tests(self) -> Dict[str, List[Dict[str, Union[int, str]]]]:
108115
if not results.get(test_file):
109116
results[test_file] = []
110117
if proc.returncode == 0:
111-
results[test_file].extend(json.loads(proc.stdout))
118+
test_data = json.loads(proc.stdout)
119+
results[test_file].extend(test_data.get("test_results", []))
120+
self.annotations.extend(test_data.get("annotations", []))
121+
self.tags.update(test_data.get("tags", []))
122+
self.overall_comments.extend(test_data.get("overall_comments", []))
112123
else:
113124
raise TestError(proc.stderr)
114125
return results
@@ -122,7 +133,25 @@ def run(self) -> None:
122133
results = self.run_r_tests()
123134
except subprocess.CalledProcessError as e:
124135
raise TestError(e.stderr) from e
125-
for test_file, result in results.items():
126-
for res in result:
127-
test = self.test_class(self, test_file, res)
136+
for test_file, test_results in results.items():
137+
for result in test_results:
138+
test = self.test_class(self, test_file, result)
128139
print(test.run(), flush=True)
140+
141+
def after_tester_run(self) -> None:
142+
"""Print all MarkUs metadata from the tests."""
143+
if self.annotations:
144+
import sys
145+
146+
print(self.annotations, file=sys.stderr)
147+
print(self.test_class.format_annotations(self.annotations))
148+
if self.tags:
149+
import sys
150+
151+
print(self.tags, file=sys.stderr)
152+
print(self.test_class.format_tags(self.tags))
153+
if self.overall_comments:
154+
import sys
155+
156+
print(self.overall_comments, file=sys.stderr)
157+
print(self.test_class.format_overall_comment(self.overall_comments, separator="\n\n"))

server/autotest_server/tests/testers/r/test_r_tester.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,15 @@ def test_success_with_context(request, monkeypatch):
99
monkeypatch.chdir(request.fspath.dirname)
1010

1111
# Mock R test results - simulates what R would return as JSON
12-
mock_r_output = [
13-
{
14-
"context": "Basic arithmetic",
15-
"test": "addition works correctly",
16-
"results": [{"type": "expectation_success", "message": ""}],
17-
}
18-
]
12+
mock_r_output = {
13+
"test_results": [
14+
{
15+
"context": "Basic arithmetic",
16+
"test": "addition works correctly",
17+
"results": [{"type": "expectation_success", "message": ""}],
18+
}
19+
]
20+
}
1921

2022
# Mock subprocess.run to return our simulated R output
2123
mock_process = MagicMock()

0 commit comments

Comments
 (0)