diff --git a/tests/assets/challenges/basic-foo-pyright-config/question.py b/tests/assets/challenges/basic-foo-pyright-config/question.py new file mode 100644 index 0000000..8227e8d --- /dev/null +++ b/tests/assets/challenges/basic-foo-pyright-config/question.py @@ -0,0 +1,19 @@ +"""A simple question, only for running tests. +""" + + +def foo(): + """ + No-op placeholder function used by tests. + + Performs no operation. + """ + pass + + +## End of your code ## +foo(1) +foo(1, 2) # expect-type-error + +## End of test code ## +# pyright: reportGeneralTypeIssues=error \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py index de2d65d..81789cf 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,21 +10,46 @@ from flask.testing import FlaskClient from app import app +from views.challenge import ChallengeManager CHALLENGES_DIR = Path(__file__).parent.parent / "challenges" ALL_QUESTIONS = list(CHALLENGES_DIR.glob("**/question.py")) ALL_SOLUTIONS = list(CHALLENGES_DIR.glob("**/solution*.py")) -ALL_HINTS = list(CHALLENGES_DIR.glob("**/hints.md")) @pytest.fixture() def assets_dir() -> Path: - """The directory contains test assets.""" + """ + Path to the test assets directory located alongside this file. + + Returns: + Path: Path to the "assets" directory adjacent to this conftest.py file. + """ return Path(__file__).parent / "assets" +@pytest.fixture() +def mgr(assets_dir: Path): + """ + Create a ChallengeManager for the "challenges" subdirectory of the provided assets directory. + + Parameters: + assets_dir (Path): Path to the test assets directory containing challenge data. + + Returns: + ChallengeManager: Instance initialized with `assets_dir / "challenges"`. + """ + return ChallengeManager(assets_dir / "challenges") + + @pytest.fixture() def test_client() -> FlaskClient: + """ + Create a Flask test client for the application. + + Returns: + test_client (FlaskClient): A test client bound to the application for issuing HTTP requests in tests. + """ return app.test_client() @@ -35,9 +60,4 @@ def question_file(request): @pytest.fixture(params=ALL_SOLUTIONS, ids=lambda x: x.parent.name) def solution_file(request): - return request.param - - -@pytest.fixture(params=ALL_HINTS, ids=lambda x: x.parent.name) -def hint_file(request) -> Path: - return request.param + return request.param \ No newline at end of file diff --git a/tests/test_identical.py b/tests/test_identical.py index e53b59f..27bc372 100644 --- a/tests/test_identical.py +++ b/tests/test_identical.py @@ -8,17 +8,27 @@ def test_identical(solution_file: Path): - level, challenge_name = solution_file.parent.name.split("-", maxsplit=1) - with solution_file.open() as f: - solution_code = f.read() - solution_test = Challenge( - name=challenge_name, level=Level(level), code=solution_code - ).test_code - - question_file = solution_file.parent / "question.py" - with question_file.open() as f: - question_code = f.read() - question_test = Challenge( - name=challenge_name, level=Level(level), code=question_code - ).test_code - assert solution_test.strip() == question_test.strip() + """ + Checks that the test code embedded in the given solution file matches the test code in the corresponding question file. + + Reads the solution file to construct a Challenge object (using the solution file's parent directory name to derive level and challenge name), extracts each challenge's test code up to the marker "\n## End of test code ##\n", strips surrounding whitespace, and asserts the two extracted test code segments are equal. + + Parameters: + solution_file (Path): Path to the solution file whose test code will be compared to the question file located at the same directory with name "question.py". + """ + def get_test_code(path: Path): + TEST_SPLITTER = "\n## End of test code ##\n" + level, challenge_name = path.parent.name.split("-", maxsplit=1) + + with solution_file.open() as f: + challenge_code = f.read() + challenge = Challenge( + name=challenge_name, level=Level(level), code=challenge_code + ) + + return challenge.test_code.partition(TEST_SPLITTER)[0] + + solution_test = get_test_code(solution_file) + question_test = get_test_code(solution_file.parent / "question.py") + + assert solution_test.strip() == question_test.strip() \ No newline at end of file diff --git a/views/challenge.py b/views/challenge.py index ba4a14e..022e79c 100644 --- a/views/challenge.py +++ b/views/challenge.py @@ -11,6 +11,29 @@ from typing import ClassVar, Optional, TypeAlias ROOT_DIR = Path(__file__).parent.parent +PYRIGHT_BASIC_CONFIG = """ +# pyright: analyzeUnannotatedFunctions=true +# pyright: strictParameterNoneValue=true +# pyright: disableBytesTypePromotions=false +# pyright: strictListInference=false +# pyright: strictDictionaryInference=false +# pyright: strictSetInference=false +# pyright: deprecateTypingAliases=false +# pyright: enableExperimentalFeatures=false +# pyright: reportMissingImports=error +# pyright: reportUndefinedVariable=error +# pyright: reportGeneralTypeIssues=error +# pyright: reportOptionalSubscript=error +# pyright: reportOptionalMemberAccess=error +# pyright: reportOptionalCall=error +# pyright: reportOptionalIterable=error +# pyright: reportOptionalContextManager=error +# pyright: reportOptionalOperand=error +# pyright: reportTypedDictNotRequiredAccess=error +# pyright: reportPrivateImportUsage=error +# pyright: reportUnboundVariable=error +# pyright: reportUnusedCoroutine=error +""" class Level(StrEnum): @@ -145,13 +168,50 @@ def _get_challenges_groupby_level(self) -> dict[Level, list[ChallengeName]]: # Pyright error messages look like: # `:: - : ` # Here we only capture the error messages and line numbers - PYRIGHT_MESSAGE_REGEX = r"^(?:.+?):(\d+):[\s\-\d]+(error:.+)$" + PYRIGHT_MESSAGE_REGEX = ( + r"^(?:.+?):(?P\d+):[\s\-\d]+(?Perror:.+)$" + ) + + @staticmethod + def _partition_test_code(test_code: str): + """ + Split test code from an optional Pyright configuration block and return the test portion plus the effective Pyright configuration. + + Parameters: + test_code (str): Combined test code that may include a separator line "\n## End of test code ##\n" followed by additional Pyright configuration. + + Returns: + tuple[str, str]: A tuple (test_code, pyright_basic_config) where `test_code` is the portion before the splitter and `pyright_basic_config` is the base PYRIGHT_BASIC_CONFIG optionally extended with any config found after the splitter. + """ + TEST_SPLITTER = "\n## End of test code ##\n" + + # PYRIGHT_BASIC_CONFIG aim to limit user to modify the config + test_code, end_test_comment, pyright_config = test_code.partition(TEST_SPLITTER) + pyright_basic_config = PYRIGHT_BASIC_CONFIG + + # Replace `## End of test code ##` with PYRIGHT_BASIC_CONFIG + if end_test_comment: + pyright_basic_config += pyright_config + return test_code, pyright_basic_config @classmethod def _type_check_with_pyright( cls, user_code: str, test_code: str ) -> TypeCheckResult: - code = f"{user_code}{test_code}" + """ + Run Pyright on combined user and test code (including any embedded Pyright config) and report type-check results. + + Parameters: + user_code (str): The user's source code to be type-checked. + test_code (str): The test suite code (may include an embedded Pyright config region) appended to the user code. + + Returns: + TypeCheckResult: An object containing `message`, a newline-separated report of Pyright errors and a summary line, + and `passed`, which is `true` if no reported errors (other than those originating from Pyright config) remain. + """ + test_code, pyright_basic_config = cls._partition_test_code(test_code) + + code = f"{user_code}{test_code}{pyright_basic_config}" buffer = io.StringIO(code) # This produces a stream of TokenInfos, example: @@ -187,39 +247,47 @@ def _type_check_with_pyright( return TypeCheckResult(message=stderr, passed=False) error_lines: list[str] = [] - # Substract lineno in merged code by lineno_delta, so that the lineno in + # Substract lineno in merged code by user_code_line_len, so that the lineno in # error message matches those in the test code editor. Fixed #20. - lineno_delta = len(user_code.splitlines()) + user_code_lines_len = len(user_code.splitlines()) for line in stdout.splitlines(): m = re.match(cls.PYRIGHT_MESSAGE_REGEX, line) if m is None: continue - line_number, message = int(m.group(1)), m.group(2) + line_number, message = int(m["line_number"]), m["message"] if line_number in error_line_seen_in_err_msg: # Each reported error should be attached to a specific line, # If it is commented with # expect-type-error, let it pass. error_line_seen_in_err_msg[line_number] = True continue # Error could be thrown from user code too, in which case delta shouldn't be applied. - error_lines.append( - f"{line_number if line_number <= lineno_delta else line_number - lineno_delta}:{message}" - ) + error_line = f"%s:{message}" + + if line_number <= user_code_lines_len: + error_lines.append(error_line % line_number) + elif line_number <= user_code_lines_len + len(test_code.splitlines()): + error_lines.append(error_line % (line_number - user_code_lines_len)) + else: + error_lines.append(error_line % "[pyright-config]") # If there are any lines that are expected to fail but not reported by pyright, # they should be considered as errors. for line_number, seen in error_line_seen_in_err_msg.items(): if not seen: error_lines.append( - f"{line_number - lineno_delta}: error: Expected type error but instead passed" + f"{line_number - user_code_lines_len}: error: Expected type error but instead passed" ) - passed = len(error_lines) == 0 - if passed: - error_lines.append("\nAll tests passed") - else: - error_lines.append(f"\nFound {len(error_lines)} errors") + # Error for pyright-config will not fail the challenge + passed = True + for error_line in error_lines: + if error_line.startswith("[pyright-config]"): + continue + passed = False + + error_lines.append(f"\nFound {len(error_lines)} errors") return TypeCheckResult(message="\n".join(error_lines), passed=passed) -challenge_manager = ChallengeManager() +challenge_manager = ChallengeManager() \ No newline at end of file diff --git a/views/views.py b/views/views.py index cf54522..ad43952 100644 --- a/views/views.py +++ b/views/views.py @@ -53,13 +53,21 @@ def index(): @app_views.route("//", methods=["GET"]) @validate_challenge def get_challenge(level: str, name: str): + """ + Render the challenge page or HTMX component for a given challenge. + + Builds the template context for the challenge identified by level and name, including the user's code under test, the test code truncated at the "\n## End of test code ##\n" marker, rendered hints (when present), challenges grouped by level, and Python runtime information. Returns the HTMX component template when HTMX is active; otherwise returns the full challenge page template. + + Returns: + Flask response: Rendered HTML response for the requested challenge page or HTMX component. + """ challenge = challenge_manager.get_challenge(ChallengeKey(Level(level), name)) params = { "name": name, "level": challenge.level, "challenges_groupby_level": challenge_manager.challenges_groupby_level, "code_under_test": challenge.user_code, - "test_code": challenge.test_code, + "test_code": challenge.test_code.partition("\n## End of test code ##\n")[0], "hints_for_display": render_hints(challenge.hints) if challenge.hints else None, "python_info": platform.python_version(), } @@ -100,4 +108,4 @@ def run_challenge(level: str, name: str): @app_views.route("/random", methods=["GET"]) def run_random_challenge(): challenge = challenge_manager.get_random_challenge() - return redirect(f"/{challenge['level']}/{challenge['name']}") + return redirect(f"/{challenge['level']}/{challenge['name']}") \ No newline at end of file