Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ All notable changes to this project will be documented here.
- Update "setting up test environment" message with http response of status code 503 (#589)
- Change rlimit resource settings to apply each worker individually (#587)
- Improve error reporting with handled assertion errors (#591)
- Add custom pytest markers to Python tester to record MarkUs metadata (#592)

## [v2.6.0]
- Update python versions in docker file (#568)
Expand Down
62 changes: 62 additions & 0 deletions server/autotest_server/testers/py/py_tester.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,24 @@ def __init__(self) -> None:
Initialize a pytest plugin for collecting results
"""
self.results = {}
self.tags = set()
self.annotations = []
self.overall_comments = []

def pytest_configure(self, config):
"""Register custom markers for use with MarkUs."""
config.addinivalue_line("markers", "markus_tag(name): indicate that the submission should be given a tag")
config.addinivalue_line(
"markers", "markus_annotation(**ann_data): indicate that the submission should be given an annotation"
)
config.addinivalue_line(
"markers",
"markus_overall_comments(comment): indicate that the submission should be given an overall comment",
)
config.addinivalue_line(
"markers",
"markus_message(text): indicate text that is displayed as part of the test output (even on success)",
)

@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_makereport(self, item, call):
Expand All @@ -96,8 +114,37 @@ def pytest_runtest_makereport(self, item, call):
"errors": str(rep.longrepr) if rep.failed else "",
"description": item.obj.__doc__,
}

# Only check markers at the end of the test case
if not rep.skipped and rep.when == "teardown":
self._process_markers(item)

return rep

def _process_markers(self, item):
"""Process all markers for the given item.

This looks for custom markers used to represent test metadata for MarkUs.
"""
for marker in item.iter_markers():
if marker.name == "markus_tag":
if len(marker.args) > 0:
self.tags.add(marker.args[0].strip())
elif "name" in marker.kwargs:
self.tags.add(marker.kwargs["name"].strip())
elif marker.name == "markus_annotation":
self.annotations.append(marker.kwargs)
elif marker.name == "markus_overall_comments":
if len(marker.args) > 0:
self.overall_comments.append(marker.args[0])
elif "comment" in marker.kwargs:
self.overall_comments.append(marker.kwargs["comment"])
elif marker.name == "markus_message" and marker.args != [] and item.nodeid in self.results:
if self.results[item.nodeid].get("errors"):
self.results[item.nodeid]["errors"] += f"\n\n{marker.args[0]}"
else:
self.results[item.nodeid]["errors"] = marker.args[0]

def pytest_collectreport(self, report):
"""
Implement a pytest hook that is run after the collector has
Expand Down Expand Up @@ -170,6 +217,9 @@ def __init__(
This tester will create tests of type test_class.
"""
super().__init__(specs, test_class, resource_settings=resource_settings)
self.annotations = []
self.overall_comments = []
self.tags = set()

@staticmethod
def _load_unittest_tests(test_file: str) -> unittest.TestSuite:
Expand Down Expand Up @@ -210,6 +260,9 @@ def _run_pytest_tests(self, test_file: str) -> List[Dict]:
plugin = PytestPlugin()
pytest.main([test_file, f"--tb={verbosity}"], plugins=[plugin])
results.extend(plugin.results.values())
self.annotations = plugin.annotations
self.overall_comments = plugin.overall_comments
self.tags = plugin.tags
finally:
sys.stdout = sys.__stdout__
return results
Expand Down Expand Up @@ -237,3 +290,12 @@ def run(self) -> None:
for res in result:
test = self.test_class(self, test_file, res)
print(test.run(), flush=True)

def after_tester_run(self) -> None:
"""Print all MarkUs metadata from the tests."""
if self.annotations:
print(self.test_class.format_annotations(self.annotations))
if self.tags:
print(self.test_class.format_tags(self.tags))
if self.overall_comments:
print(self.test_class.format_overall_comment(self.overall_comments, separator="\n\n"))
34 changes: 33 additions & 1 deletion server/autotest_server/testers/tester.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from __future__ import annotations

import json
from abc import ABC, abstractmethod
from functools import wraps
from typing import Optional, Callable, Any, Type, Dict, List
from typing import Optional, Callable, Any, Type, Dict, Iterable, List
from .specs import TestSpecs
import traceback
import resource
Expand Down Expand Up @@ -99,6 +101,36 @@ def format_annotations(annotation_data: List[Dict[str, Any]]) -> str:
"""
return json.dumps({"annotations": annotation_data})

@staticmethod
def format_overall_comment(overall_comment_data: str | Iterable[str], separator: str = "\n\n") -> str:
"""
Formats overall comment data.
:param overall_comment_data: the contents of the overall comment
:param separator: if overall_comment_data is a collection, use separator to join the elements
:return a json string representation of the tag data.
"""
if isinstance(overall_comment_data, str):
content = overall_comment_data
else:
content = separator.join(overall_comment_data)
return json.dumps({"overall_comment": content})

@staticmethod
def format_tags(tag_data: Iterable[str | dict[str, str]]) -> str:
"""
Formats tag data.
:param tag_data: an iterable of tag data. Each element is either a tag name (str) or a dictionary with
keys "name" and "description".
:return a json string representation of the tag data.
"""
tag_list = []
for tag in tag_data:
if isinstance(tag, str):
tag_list.append({"name": tag})
else:
tag_list.append(tag)
return json.dumps({"tags": tag_list})

def passed_with_bonus(self, points_bonus: int, message: str = "") -> str:
"""
Passes this test earning bonus points in addition to the test total points. If a feedback file is enabled, adds
Expand Down