Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 12 additions & 2 deletions bin/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -705,7 +705,7 @@ def validate_ans_and_out(
infile = problem.tmpdir / "data" / t.hash / "testcase.in"
assert infile.is_file()

if testcase.root == "invalid_input":
if testcase.root in ["invalid_input", "testing_tool_test"]:
return True

ansfile = infile.with_suffix(".ans")
Expand Down Expand Up @@ -939,7 +939,11 @@ def generate_from_rule():
def generate_from_solution(testcase: Testcase, bar: ProgressBar):
nonlocal meta_yaml

if testcase.root in [*config.INVALID_CASE_DIRECTORIES, "valid_output"]:
if testcase.root in [
*config.INVALID_CASE_DIRECTORIES,
"valid_output",
"testing_tool_test",
]:
return True
if config.args.no_solution:
return True
Expand Down Expand Up @@ -1021,6 +1025,8 @@ def generate_visualization(testcase: Testcase, bar: ProgressBar):

if testcase.root in config.INVALID_CASE_DIRECTORIES:
return True
if testcase.root == "testing_tool_test":
return True
if config.args.no_visualizer:
return True

Expand Down Expand Up @@ -1182,6 +1188,7 @@ def add_test_case_to_cache():

# consider specific files for the uniqueness of this testcase
relevant_files = {
"testing_tool_test": [".in"],
"invalid_input": [".in"],
"invalid_answer": [".in", ".ans"],
"invalid_output": [".in", ".ans", ".out"],
Expand Down Expand Up @@ -1747,6 +1754,7 @@ def parse(key: str, name_gen: Callable[[], str], yaml: dict, parent: AnyDirector
"invalid_answer",
"invalid_input",
"valid_output",
"testing_tool_test",
]
keys = dictionary.keys()
if isinstance(parent, RootDirectory):
Expand Down Expand Up @@ -2155,6 +2163,8 @@ def reorder(self):
warn(f"{d} is used for invalid test data. Skipping.")
elif parts[0] == "valid_output":
warn(f"{d} is used for valid test data. Skipping.")
elif parts[0] == "testing_tool_test":
warn(f"{d} is used to test the testing tool. Skipping.")
elif path not in self.known_directories:
warn(f"{d} is not a generated directory. Skipping.")
elif not self.known_directories[path].numbered:
Expand Down
68 changes: 51 additions & 17 deletions bin/problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import parallel
import run
import testcase
import testing_tool
import validate
import validator_tests
import verdicts
Expand Down Expand Up @@ -351,7 +352,7 @@ def __init__(self, path: Path, tmpdir: Path, label: Optional[str] = None):

# Some caches.
self._testcases = dict[
tuple[Optional[validate.Mode], bool, bool], list[testcase.Testcase]
tuple[Optional[validate.Mode], bool, bool, bool], list[testcase.Testcase]
]()
self._submissions: Optional[list[run.Submission] | Literal[False]] = None
self._validators_cache = dict[ # The "bool" is for "check_constraints"
Expand Down Expand Up @@ -629,17 +630,17 @@ def testcases(
mode: Optional[validate.Mode] = None,
needans=True,
only_samples=False,
testing_tool=False,
) -> Sequence[testcase.Testcase]:
only_samples = config.args.samples or only_samples

key = (mode, needans, only_samples)
key = (mode, needans, only_samples, testing_tool)
if key in p._testcases is not None:
return p._testcases[key]

in_paths = None
if config.args.testcases:
if only_samples:
assert False
assert not only_samples
# Deduplicate testcases with both .in and .ans.
in_paths = []
for t in config.args.testcases:
Expand All @@ -654,6 +655,8 @@ def testcases(

in_paths = list(set(in_paths))
elif mode is not None:
assert not only_samples
assert not testing_tool
assert needans
in_paths = []
for prefix in {
Expand All @@ -663,6 +666,8 @@ def testcases(
validate.Mode.VALID_OUTPUT: ["secret", "sample", "valid_output"],
}[mode]:
in_paths += glob(p.path, f"data/{prefix}/**/*.in")
elif testing_tool:
in_paths = list(glob(p.path, "data/testing_tool_test/**/*.in"))
else:
in_paths = list(glob(p.path, "data/sample/**/*.in"))
if not only_samples:
Expand Down Expand Up @@ -702,7 +707,7 @@ def testcases(
testcases.append(t)
testcases.sort(key=lambda t: t.name)

if len(testcases) == 0:
if len(testcases) == 0 and not testing_tool:
ans = (
" with answer"
if needans and mode not in [validate.Mode.INVALID, validate.Mode.VALID_OUTPUT]
Expand Down Expand Up @@ -1010,7 +1015,7 @@ def _validators(
paths = list(glob(problem.path / cls.source_dir, "*"))

# TODO: Instead of checking file contents, maybe specify this in generators.yaml?
def has_constraints_checking(f):
def has_constraints_checking(f: Path) -> bool:
if not f.is_file():
return False
try:
Expand Down Expand Up @@ -1042,7 +1047,7 @@ def has_constraints_checking(f):
]
bar = ProgressBar(f"Building {cls.validator_type} validator", items=validators)

def build_program(p):
def build_program(p: "Program") -> None:
localbar = bar.start(p)
p.build(localbar)
localbar.done()
Expand All @@ -1054,7 +1059,9 @@ def build_program(p):
return validators

# get all testcases and submissions and prepare the output validator and visualizer
def prepare_run(problem):
def prepare_run(
problem,
) -> Literal[False] | tuple[Sequence[testcase.Testcase], Sequence[run.Submission]]:
testcases = problem.testcases()
if not testcases:
return False
Expand All @@ -1074,7 +1081,9 @@ def prepare_run(problem):
return testcases, submissions

@staticmethod
def run_some(testcases, submissions):
def run_some(
testcases: Sequence[testcase.Testcase], submissions: Sequence[run.Submission]
) -> tuple[bool, verdicts.VerdictTable]:
max_submission_len = max([len(x.name) for x in submissions])

ok = True
Expand All @@ -1093,7 +1102,7 @@ def run_some(testcases, submissions):
return ok, verdict_table

# called by bt run
def run_submissions(problem):
def run_submissions(problem) -> bool:
ts_pair = problem.prepare_run()
if not ts_pair:
return False
Expand All @@ -1119,7 +1128,7 @@ def run_submissions(problem):
# Instead of validating the output, this function just prints all output to the
# terminal.
# Note: The CLI only accepts one submission.
def test_submissions(problem):
def test_submissions(problem) -> bool:
submissions = problem.submissions()
if submissions is False:
return False
Expand All @@ -1132,16 +1141,18 @@ def test_submissions(problem):
return True

@staticmethod
def _print_table(verdict_table, testcases):
def _print_table(
verdict_table: Sequence[verdicts.Verdicts], testcases: Sequence[testcase.Testcase]
) -> None:
# Begin by aggregating bitstrings for all testcases, and find bitstrings occurring often (>=config.TABLE_THRESHOLD).
def single_verdict(row, testcase):
def single_verdict(row: verdicts.Verdicts, testcase: testcase.Testcase) -> str:
assert row[testcase.name] is not None
if row[testcase.name] is not False:
return verdicts.to_char(row[testcase.name])
else:
return f"{Style.DIM}-{Style.RESET_ALL}"

def make_verdict(tc):
def make_verdict(tc: testcase.Testcase) -> str:
return "".join(map(lambda row: single_verdict(row, tc), verdict_table))

resultant_count, resultant_id = dict[str, int](), dict[str, int]()
Expand Down Expand Up @@ -1214,11 +1225,34 @@ def make_verdict(tc):
print(str.format("(Type {})", resultant_id[resultant]), end="", file=sys.stderr)
print(end="\n", file=sys.stderr)

def reset_testcase_hashes(self):
self._testcase_hashes = {}
# called by bt run_testing_tool
def run_testing_tool(problem) -> bool:
testcases = problem.testcases(needans=False, testing_tool=True)
testinputs = [testing_tool.TestInput(problem, t.in_path, t.short_path) for t in testcases]
if not config.args.testcases:
sampleinputs = []
for in_path, _ in problem.download_samples():
sample = testing_tool.TestInput(
problem, in_path, in_path.relative_to(problem.path / "data")
)
if sample not in testinputs:
sampleinputs.append(sample)
testinputs = sampleinputs + testinputs
if not testinputs:
warn(
f"Didn't find any testcases to run the testing tool in problem {problem.name}. Skipping."
)
return False
submissions = problem.selected_or_accepted_submissions()
if not submissions:
return False
return testing_tool.run(problem, testinputs, submissions)

def reset_testcase_hashes(self) -> None:
self._testcase_hashes: dict[str, testcase.Testcase] = {}

# Returns None for new testcases or the Testcase object it equals.
def matches_existing_testcase(self, t):
def matches_existing_testcase(self, t: testcase.Testcase) -> Optional[testcase.Testcase]:
hashes = {}
relevant_files = {
"invalid_input": ["in"],
Expand Down
24 changes: 12 additions & 12 deletions bin/program.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def sanitizer():
# After build() has been called, the following are available:
# - run_command: command to be executed. E.g. ['/path/to/run'] or ['python3', '/path/to/main.py']. `None` if something failed.
#
# build() will return the (run_command, message) pair.
# build() will return the true if building was successfull.
class Program:
input_files: list[Path] # Populated in Program.build

Expand Down Expand Up @@ -181,26 +181,26 @@ def __init__(

# is file at path executable
@staticmethod
def _is_executable(path):
return path.is_file() and (
path.stat().st_mode & (stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH)
def _is_executable(path: Path) -> bool:
return bool(
path.is_file() and (path.stat().st_mode & (stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH))
)

# Returns true when file f matches the given shebang regex.
@staticmethod
def _matches_shebang(f, shebang):
def _matches_shebang(f: Path, shebang: Optional[re.Pattern]) -> bool:
if shebang is None:
return True
with f.open() as o:
return shebang.search(o.readline())
return shebang.search(o.readline()) is not None

# Do not warn for the same fallback language multiple times.
warn_cache: set[str] = set()

language: Optional[str]

# Sets self.language and self.env['mainfile']
def _get_language(self, bar: ProgressBar):
def _get_language(self, bar: ProgressBar) -> bool:
fallback = False
candidates = []
for lang in languages():
Expand Down Expand Up @@ -300,7 +300,7 @@ def _get_language(self, bar: ProgressBar):
bar.error(f"No language detected for {self.path}.")
return False

def _checks(self, bar: ProgressBar):
def _checks(self, bar: ProgressBar) -> None:
for f in self.source_files:
if f.stat().st_size >= config.ICPC_FILE_LIMIT * 1024**2:
bar.warn(
Expand Down Expand Up @@ -367,7 +367,7 @@ def _checks(self, bar: ProgressBar):
pass

# Return True on success.
def _compile(self, bar: ProgressBar):
def _compile(self, bar: ProgressBar) -> bool:
meta_path = self.tmpdir / "meta_.yaml"

# Remove all non-source files.
Expand Down Expand Up @@ -415,7 +415,7 @@ def _compile(self, bar: ProgressBar):
return True

# Return True on success, False on failure.
def build(self, bar: ProgressBar):
def build(self, bar: ProgressBar) -> bool:
assert not self.built
self.built = True

Expand Down Expand Up @@ -527,7 +527,7 @@ def _exec_command(self, *args, **kwargs) -> ExecResult:
return exec_command(*args, **kwargs)

@staticmethod
def add_callback(problem, path, c):
def add_callback(problem: "Problem", path: Path, c: Callable[["Program"], Any]):
if path not in problem._program_callbacks:
problem._program_callbacks[path] = []
problem._program_callbacks[path].append(c)
Expand All @@ -547,7 +547,7 @@ def __init__(self, problem: "Problem", path: Path, **kwargs):
# Run the generator in the given working directory.
# May write files in |cwd| and stdout is piped to {name}.in if it's not written already.
# Returns ExecResult. Success when result.status == ExecStatus.ACCEPTED.
def run(self, bar, cwd, name, args=[]):
def run(self, bar: ProgressBar, cwd: Path, name: str, args: list[str] = []) -> ExecResult:
assert self.run_command is not None

in_path = cwd / (name + ".in")
Expand Down
5 changes: 1 addition & 4 deletions bin/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -483,10 +483,7 @@ def process_run(run: Run):
localbar.item_width = padding_len
localbar.done(got_expected, message, data, print_item=False)

p = parallel.new_queue(process_run, pin=True)
for run in runs:
p.put(run)
p.done()
parallel.run_tasks(process_run, runs, pin=True)

self.verdict = verdicts["."]
assert isinstance(self.verdict, Verdict), "Verdict of root must not be empty"
Expand Down
2 changes: 1 addition & 1 deletion bin/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ def loc(file: Path) -> Optional[int]:
content = file.read_text()
lexer = lexers.guess_lexer_for_filename(file, content)
assert isinstance(lexer, pygments.lexer.Lexer)
language = lexer.name.lower()
language = getattr(lexer, "name").lower()
tokens = lexer.get_tokens(content)

count = 0
Expand Down
Loading
Loading