diff --git a/.github/actions/install-python/action.yml b/.github/actions/install-python/action.yml index 9da90477f107..e8ef1e2c045d 100644 --- a/.github/actions/install-python/action.yml +++ b/.github/actions/install-python/action.yml @@ -14,7 +14,7 @@ runs: using: "composite" steps: - name: Install Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.12.6' diff --git a/.github/workflows/positron-python-ci.yml b/.github/workflows/positron-python-ci.yml index 84a15bf2413a..8e2127f386fa 100644 --- a/.github/workflows/positron-python-ci.yml +++ b/.github/workflows/positron-python-ci.yml @@ -114,7 +114,7 @@ jobs: uv pip install --upgrade -r build/test-requirements.txt -r ./python_files/posit/pinned-test-requirements.txt - name: Run Pyright - uses: jakebailey/pyright-action@b5d50e5cde6547546a5c4ac92e416a8c2c1a1dfe # v2.3.2 + uses: jakebailey/pyright-action@6cabc0f01c4994be48fd45cd9dbacdd6e1ee6e5e # v2.3.3 with: version: 1.1.308 working-directory: ${{ env.PYTHON_SRC_DIR }} @@ -281,7 +281,7 @@ jobs: cache-dependency-path: ${{ env.special-working-directory-relative }}/${{ env.PROJECT_DIR }}/package-lock.json - name: Use Python ${{ matrix.python }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python }} cache: 'pip' diff --git a/.github/workflows/test-e2e-windows.yml b/.github/workflows/test-e2e-windows.yml index 28dcfc62ecfa..ccd502a66633 100644 --- a/.github/workflows/test-e2e-windows.yml +++ b/.github/workflows/test-e2e-windows.yml @@ -118,7 +118,7 @@ jobs: shell: bash - name: Install System Level Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.10.10" @@ -166,7 +166,7 @@ jobs: # Alternate python version - name: Install Python 3.13.0 - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.13.0" diff --git a/extensions/positron-python/.github/actions/build-vsix/action.yml b/extensions/positron-python/.github/actions/build-vsix/action.yml index eaabe5141e8b..bfe90fc940e8 100644 --- a/extensions/positron-python/.github/actions/build-vsix/action.yml +++ b/extensions/positron-python/.github/actions/build-vsix/action.yml @@ -32,7 +32,7 @@ runs: # Jedi LS depends on dataclasses which is not in the stdlib in Python 3.7. - name: Use Python 3.9 for JediLSP - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: 3.9 cache: 'pip' diff --git a/extensions/positron-python/.github/actions/lint/action.yml b/extensions/positron-python/.github/actions/lint/action.yml index 9992b442c276..9971c0fbcf96 100644 --- a/extensions/positron-python/.github/actions/lint/action.yml +++ b/extensions/positron-python/.github/actions/lint/action.yml @@ -10,7 +10,7 @@ runs: using: 'composite' steps: - name: Install Node - uses: actions/setup-node@v4 + uses: actions/setup-node@v5 with: node-version: ${{ inputs.node_version }} cache: 'npm' @@ -36,7 +36,7 @@ runs: shell: bash - name: Install Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.x' cache: 'pip' diff --git a/extensions/positron-python/.github/instructions/learning.instructions.md b/extensions/positron-python/.github/instructions/learning.instructions.md new file mode 100644 index 000000000000..28b085f486ce --- /dev/null +++ b/extensions/positron-python/.github/instructions/learning.instructions.md @@ -0,0 +1,34 @@ +--- +applyTo: '**' +description: This document describes how to deal with learnings that you make. (meta instruction) +--- + +This document describes how to deal with learnings that you make. +It is a meta-instruction file. + +Structure of learnings: + +- Each instruction file has a "Learnings" section. +- Each learning has a counter that indicates how often that learning was useful (initially 1). +- Each learning has a 1 sentence description of the learning that is clear and concise. + +Example: + +```markdown +## Learnings + +- Prefer `const` over `let` whenever possible (1) +- Avoid `any` type (3) +``` + +When the user tells you "learn!", you should: + +- extract a learning from the recent conversation + _ identify the problem that you created + _ identify why it was a problem + _ identify how you were told to fix it/how the user fixed it + _ generate only one learning (1 sentence) that helps to summarize the insight gained +- then, add the reflected learning to the "Learnings" section of the most appropriate instruction file + +Important: Whenever a learning was really useful, increase the counter!! +When a learning was not useful and just caused more problems, decrease the counter. diff --git a/extensions/positron-python/.github/instructions/testing_feature_area.instructions.md b/extensions/positron-python/.github/instructions/testing_feature_area.instructions.md new file mode 100644 index 000000000000..038dc1025ea5 --- /dev/null +++ b/extensions/positron-python/.github/instructions/testing_feature_area.instructions.md @@ -0,0 +1,187 @@ +--- +applyTo: 'src/client/testing/**' +--- + +# Testing feature area — Discovery, Run, Debug, and Results + +This document maps the testing support in the extension: discovery, execution (run), debugging, result reporting and how those pieces connect to the codebase. It's written for contributors and agents who need to navigate, modify, or extend test support (both `unittest` and `pytest`). + +## Overview + +- Purpose: expose Python tests in the VS Code Test Explorer (TestController), support discovery, run, debug, and surface rich results and outputs. +- Scope: provider-agnostic orchestration + provider-specific adapters, TestController mapping, IPC with Python-side scripts, debug launch integration, and configuration management. + +## High-level architecture + +- Controller / UI bridge: orchestrates TestController requests and routes them to workspace adapters. +- Workspace adapter: provider-agnostic coordinator that translates TestController requests to provider adapters and maps payloads back into TestItems/TestRuns. +- Provider adapters: implement discovery/run/debug for `unittest` and `pytest` by launching Python scripts and wiring named-pipe IPC. +- Result resolver: translates Python-side JSON/IPCPayloads into TestController updates (start/pass/fail/output/attachments). +- Debug launcher: prepares debug sessions and coordinates the debugger attach flow with the Python runner. + +## Key components (files and responsibilities) + +- Entrypoints + - `src/client/testing/testController/controller.ts` — `PythonTestController` (main orchestrator). + - `src/client/testing/serviceRegistry.ts` — DI/wiring for testing services. +- Workspace orchestration + - `src/client/testing/testController/workspaceTestAdapter.ts` — `WorkspaceTestAdapter` (provider-agnostic entry used by controller). +- Provider adapters + - Unittest + - `src/client/testing/testController/unittest/testDiscoveryAdapter.ts` + - `src/client/testing/testController/unittest/testExecutionAdapter.ts` + - Pytest + - `src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts` + - `src/client/testing/testController/pytest/pytestExecutionAdapter.ts` +- Result resolution and helpers + - `src/client/testing/testController/common/resultResolver.ts` — `PythonResultResolver` (maps payload -> TestController updates). + - `src/client/testing/testController/common/testItemUtilities.ts` — helpers for TestItem lifecycle. + - `src/client/testing/testController/common/types.ts` — `ITestDiscoveryAdapter`, `ITestExecutionAdapter`, `ITestResultResolver`, `ITestDebugLauncher`. + - `src/client/testing/testController/common/debugLauncher.ts` — debug session creation helper. + - `src/client/testing/testController/common/utils.ts` — named-pipe helpers and command builders (`startDiscoveryNamedPipe`, etc.). +- Configuration + - `src/client/testing/common/testConfigurationManager.ts` — per-workspace test settings. + - `src/client/testing/configurationFactory.ts` — configuration service factory. +- Utilities & glue + - `src/client/testing/utils.ts` — assorted helpers used by adapters. + - Python-side scripts: `python_files/unittestadapter/*`, `python_files/pytestadapter/*` — discovery/run code executed by adapters. + +## Python subprocess runners (what runs inside Python) + +The adapters in the extension don't implement test discovery/run logic themselves — they spawn a Python subprocess that runs small helper scripts located under `python_files/` and stream structured events back to the extension over the named-pipe IPC. This is a central part of the feature area; changes here usually require coordinated edits in both the TypeScript adapters and the Python scripts. + +- Unittest helpers (folder: `python_files/unittestadapter`) + + - `discovery.py` — performs `unittest` discovery and emits discovery payloads (test suites, cases, locations) on the IPC channel. + - `execution.py` / `django_test_runner.py` — run tests for `unittest` and, where applicable, Django test runners; emit run events (start, stdout/stderr, pass, fail, skip, teardown) and attachment info. + - `pvsc_utils.py`, `django_handler.py` — utility helpers used by the runners for environment handling and Django-specific wiring. + - The adapter TypeScript files (`testDiscoveryAdapter.ts`, `testExecutionAdapter.ts`) construct the command line, start a named-pipe listener, and spawn these Python scripts using the extension's ExecutionFactory (activated interpreter) so the scripts execute inside the user's selected environment. + +- Pytest helpers (folder: `python_files/vscode_pytest`) + + - `_common.py` — shared helpers for pytest runner scripts. + - `run_pytest_script.py` — the primary pytest runner used for discovery and execution; emits the same structured IPC payloads the extension expects (discovery events and run events). + - The `pytest` execution adapter (`pytestExecutionAdapter.ts`) and discovery adapter build the CLI to run `run_pytest_script.py`, start the pipe, and translate incoming payloads via `PythonResultResolver`. + +- IPC contract and expectations + + - Adapters rely on a stable JSON payload contract emitted by the Python scripts: identifiers for tests, event types (discovered, collected, started, passed, failed, skipped), timings, error traces, and optional attachments (logs, captured stdout/stderr, file links). + - The extension maps these payloads to `TestItem`/`TestRun` updates via `PythonResultResolver` (`src/client/testing/testController/common/resultResolver.ts`). If you change payload shape, update the resolver and tests concurrently. + +- How the subprocess is started + - Execution adapters use the extension's `ExecutionFactory` (preferred) to get an activated interpreter and then spawn a child process that runs the helper script. The adapter will set up environment variables and command-line args (including the pipe name / run-id) so the Python runner knows where to send events and how to behave (discovery vs run vs debug). + - For debug sessions a debug-specific entry argument/port is passed and `common/debugLauncher.ts` coordinates starting a VS Code debug session that will attach to the Python process. + +## Core functionality (what to change where) + +- Discovery + - Entry: `WorkspaceTestAdapter.discoverTests` → provider discovery adapter. Adapter starts a named-pipe listener, spawns the discovery script in an activated interpreter, forwards discovery events to `PythonResultResolver` which creates/updates TestItems. + - Files: `workspaceTestAdapter.ts`, `*DiscoveryAdapter.ts`, `resultResolver.ts`, `testItemUtilities.ts`. +- Run / Execution + - Entry: `WorkspaceTestAdapter.executeTests` → provider execution adapter. Adapter spawns runner in an activated env, runner streams run events to the pipe, `PythonResultResolver` updates a `TestRun` with start/pass/fail and attachments. + - Files: `workspaceTestAdapter.ts`, `*ExecutionAdapter.ts`, `resultResolver.ts`. +- Debugging + - Flow: debug request flows like a run but goes through `debugLauncher.ts` to create a VS Code debug session with prepared ports/pipes. The Python runner coordinates attach/continue with the debugger. + - Files: `*ExecutionAdapter.ts`, `common/debugLauncher.ts`, `common/types.ts`. +- Result reporting + - `resultResolver.ts` is the canonical place to change how JSON payloads map to TestController constructs (messages, durations, error traces, attachments). + +## Typical workflows (short) + +- Full discovery + + 1. `PythonTestController` triggers discovery -> `WorkspaceTestAdapter.discoverTests`. + 2. Provider discovery adapter starts pipe and launches Python discovery script. + 3. Discovery events -> `PythonResultResolver` -> TestController tree updated. + +- Run tests + + 1. Controller collects TestItems -> creates `TestRun`. + 2. `WorkspaceTestAdapter.executeTests` delegates to execution adapter which launches the runner. + 3. Runner events arrive via pipe -> `PythonResultResolver` updates `TestRun`. + 4. On process exit the run is finalized. + +- Debug a test + 1. Debug request flows to execution adapter. + 2. Adapter prepares ports and calls `debugLauncher` to start a VS Code debug session with the run ID. + 3. Runner coordinates with the debugger; `PythonResultResolver` still receives and applies run events. + +## Tests and examples to inspect + +- Unit/integration tests for adapters and orchestration under `src/test/` (examples): + - `src/test/testing/common/testingAdapter.test.ts` + - `src/test/testing/testController/workspaceTestAdapter.unit.test.ts` + - `src/test/testing/testController/unittest/testExecutionAdapter.unit.test.ts` + - Adapter tests demonstrate expected telemetry, debug-launch payloads and result resolution. + +## History & evolution (brief) + +- Migration to TestController API: the code organizes around VS Code TestController, mapping legacy adapter behaviour into TestItems/TestRuns. +- Named-pipe IPC: discovery/run use named-pipe IPC to stream events from Python runner scripts (`python_files/*`) which enables richer, incremental updates and debug coordination. +- Environment activation: adapters prefer the extension ExecutionFactory (activated interpreter) to run discovery and test scripts. + +## Pointers for contributors (practical) + +- To extend discovery output: update the Python discovery script in `python_files/*` and `resultResolver.ts` to parse new payload fields. +- To change run behaviour (args/env/timouts): update the provider execution adapter (`*ExecutionAdapter.ts`) and add/update tests under `src/test/`. +- To change debug flow: edit `common/debugLauncher.ts` and adapters' debug paths; update tests that assert launch argument shapes. + +## Django support (how it works) + +- The extension supports Django projects by delegating discovery and execution to Django-aware Python helpers under `python_files/unittestadapter`. + - `python_files/unittestadapter/django_handler.py` contains helpers that invoke `manage.py` for discovery or execute Django test runners inside the project context. + - `python_files/unittestadapter/django_test_runner.py` provides `CustomDiscoveryTestRunner` and `CustomExecutionTestRunner` which integrate with the extension by using the same IPC contract (they use `UnittestTestResult` and `send_post_request` to emit discovery/run payloads). +- How adapters pass Django configuration: + - Execution adapters set environment variables (e.g. `MANAGE_PY_PATH`) and modify `PYTHONPATH` so Django code and the custom test runner are importable inside the spawned subprocess. + - For discovery the adapter may run the discovery helper which calls `manage.py test` with a custom test runner that emits discovery payloads instead of executing tests. +- Practical notes for contributors: + - Changes to Django discovery/execution often require edits in both `django_test_runner.py`/`django_handler.py` and the TypeScript adapters (`testDiscoveryAdapter.ts` / `testExecutionAdapter.ts`). + - The Django test runner expects `TEST_RUN_PIPE` environment variable to be present to send IPC events (see `django_test_runner.py`). + +## Settings referenced by this feature area + +- The extension exposes several `python.testing.*` settings used by adapters and configuration code (declared in `package.json`): + - `python.testing.pytestEnabled`, `python.testing.unittestEnabled` — enable/disable frameworks. + - `python.testing.pytestPath`, `python.testing.pytestArgs`, `python.testing.unittestArgs` — command path and CLI arguments used when spawning helper scripts. + - `python.testing.cwd` — optional working directory used when running discovery/runs. + - `python.testing.autoTestDiscoverOnSaveEnabled`, `python.testing.autoTestDiscoverOnSavePattern` — control automatic discovery on save. + - `python.testing.debugPort` — default port used for debug runs. + - `python.testing.promptToConfigure` — whether to prompt users to configure tests when potential test folders are found. +- Where to look in the code: + - Settings are consumed by `src/client/testing/common/testConfigurationManager.ts`, `src/client/testing/configurationFactory.ts`, and adapters under `src/client/testing/testController/*` which read settings to build CLI args and env for subprocesses. + - The setting definitions and descriptions are in `package.json` and localized strings in `package.nls.json`. + +## Coverage support (how it works) + +- Coverage is supported by running the Python helper scripts with coverage enabled and then collecting a coverage payload from the runner. + - Pytest-side coverage logic lives in `python_files/vscode_pytest/__init__.py` (checks `COVERAGE_ENABLED`, imports `coverage`, computes per-file metrics and emits a `CoveragePayloadDict`). + - Unittest adapters enable coverage by setting environment variable(s) (e.g. `COVERAGE_ENABLED`) when launching the subprocess; adapters and `resultResolver.ts` handle the coverage profile kind (`TestRunProfileKind.Coverage`). +- Flow summary: + 1. User starts a Coverage run via Test Explorer (profile kind `Coverage`). + 2. Controller/adapters set `COVERAGE_ENABLED` (or equivalent) in the subprocess env and invoke the runner script. + 3. The Python runner collects coverage (using `coverage` or `pytest-cov`), builds a file-level coverage map, and sends a coverage payload back over the IPC. + 4. `PythonResultResolver` (`src/client/testing/testController/common/resultResolver.ts`) receives the coverage payload and stores `detailedCoverageMap` used by the TestController profile to show file-level coverage details. +- Tests that exercise coverage flows are under `src/test/testing/*` and `python_files/tests/*` (see `testingAdapter.test.ts` and adapter unit tests that assert `COVERAGE_ENABLED` is set appropriately). + +## Interaction with the VS Code API + +- TestController API + - The feature area is built on VS Code's TestController/TestItem/TestRun APIs (`vscode.tests.createTestController` / `tests.createTestController` in the code). The controller creates a `TestController` in `src/client/testing/testController/controller.ts` and synchronizes `TestItem` trees with discovery payloads. + - `PythonResultResolver` maps incoming JSON events to VS Code API calls: `testRun.appendOutput`, `testRun.passed/failed/skipped`, `testRun.end`, and `TestItem` updates (labels, locations, children). +- Debug API + - Debug runs use the Debug API to start an attach/launch session. The debug launcher implementation is in `src/client/testing/testController/common/debugLauncher.ts` which constructs a debug configuration and calls the VS Code debug API to start a session (e.g. `vscode.debug.startDebugging`). + - Debug adapter/resolver code in the extension's debugger modules may also be used when attaching to Django or test subprocesses. +- Commands and configuration + - The Test Controller wires commands that appear in the Test Explorer and editor context menus (see `package.json` contributes `commands`) and listens to configuration changes filtered by `python.testing` in `src/client/testing/main.ts`. +- The "Copy Test ID" command (`python.copyTestId`) can be accessed from both the Test Explorer context menu (`testing/item/context`) and the editor gutter icon context menu (`testing/item/gutter`). This command copies test identifiers to the clipboard in the appropriate format for the active test framework (pytest path format or unittest module.class.method format). +- Execution factory & activated environments + - Adapters use the extension `ExecutionFactory` to spawn subprocesses in an activated interpreter (so the user's venv/conda is used). This involves the extension's internal environment execution APIs and sometimes `envExt` helpers when the external environment extension is present. + +## Learnings + +- Never await `showErrorMessage()` calls in test execution adapters as it blocks the test UI thread and freezes the Test Explorer (1) +- VS Code test-related context menus are contributed to using both `testing/item/context` and `testing/item/gutter` menu locations in package.json for full coverage (1) + +``` + +``` diff --git a/extensions/positron-python/.github/prompts/extract-impl-instructions.prompt.md b/extensions/positron-python/.github/prompts/extract-impl-instructions.prompt.md new file mode 100644 index 000000000000..c2fb08b443c7 --- /dev/null +++ b/extensions/positron-python/.github/prompts/extract-impl-instructions.prompt.md @@ -0,0 +1,79 @@ +--- +mode: edit +--- + +Analyze the specified part of the VS Code Python Extension codebase to generate or update implementation instructions in `.github/instructions/.instructions.md`. + +## Task + +Create concise developer guidance focused on: + +### Implementation Essentials + +- **Core patterns**: How this component is typically implemented and extended +- **Key interfaces**: Essential classes, services, and APIs with usage examples +- **Integration points**: How this component interacts with other extension parts +- **Common tasks**: Typical development scenarios with step-by-step guidance + +### Content Structure + +````markdown +--- +description: 'Implementation guide for the part of the Python Extension' +--- + +# Implementation Guide + +## Overview + +Brief description of the component's purpose and role in VS Code Python Extension. + +## Key Concepts + +- Main abstractions and their responsibilities +- Important interfaces and base classes + +## Common Implementation Patterns + +### Pattern 1: [Specific Use Case] + +```typescript +// Code example showing typical implementation +``` +```` + +### Pattern 2: [Another Use Case] + +```typescript +// Another practical example +``` + +## Integration Points + +- How this component connects to other VS Code Python Extension systems +- Required services and dependencies +- Extension points and contribution models + +## Essential APIs + +- Key methods and interfaces developers need +- Common parameters and return types + +## Gotchas and Best Practices + +- Non-obvious behaviors to watch for +- Performance considerations +- Common mistakes to avoid + +``` + +## Guidelines +- **Be specific**: Use actual class names, method signatures, and file paths +- **Show examples**: Include working code snippets from the codebase +- **Target implementation**: Focus on how to build with/extend this component +- **Keep it actionable**: Every section should help developers accomplish tasks + +Source conventions from existing `.github/instructions/*.instructions.md`, `CONTRIBUTING.md`, and codebase patterns. + +If `.github/instructions/.instructions.md` exists, intelligently merge new insights with existing content. +``` diff --git a/extensions/positron-python/.github/prompts/extract-usage-instructions.prompt.md b/extensions/positron-python/.github/prompts/extract-usage-instructions.prompt.md new file mode 100644 index 000000000000..ea48f162a220 --- /dev/null +++ b/extensions/positron-python/.github/prompts/extract-usage-instructions.prompt.md @@ -0,0 +1,30 @@ +--- +mode: edit +--- + +Analyze the user requested part of the codebase (use a suitable ) to generate or update `.github/instructions/.instructions.md` for guiding developers and AI coding agents. + +Focus on practical usage patterns and essential knowledge: + +- How to use, extend, or integrate with this code area +- Key architectural patterns and conventions specific to this area +- Common implementation patterns with code examples +- Integration points and typical interaction patterns with other components +- Essential gotchas and non-obvious behaviors + +Source existing conventions from `.github/instructions/*.instructions.md`, `CONTRIBUTING.md`, and `README.md`. + +Guidelines: + +- Write concise, actionable instructions using markdown structure +- Document discoverable patterns with concrete examples +- If `.github/instructions/.instructions.md` exists, merge intelligently +- Target developers who need to work with or extend this code area + +Update `.github/instructions/.instructions.md` with header: + +``` +--- +description: "How to work with the part of the codebase" +--- +``` diff --git a/extensions/positron-python/.github/workflows/build.yml b/extensions/positron-python/.github/workflows/build.yml index 6d122b77288b..88b88ebc2876 100644 --- a/extensions/positron-python/.github/workflows/build.yml +++ b/extensions/positron-python/.github/workflows/build.yml @@ -130,7 +130,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Use Python ${{ env.PYTHON_VERSION }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} @@ -155,7 +155,7 @@ jobs: python -m pip install --upgrade -r build/test-requirements.txt - name: Run Pyright - uses: jakebailey/pyright-action@b5d50e5cde6547546a5c4ac92e416a8c2c1a1dfe # v2.3.2 + uses: jakebailey/pyright-action@6cabc0f01c4994be48fd45cd9dbacdd6e1ee6e5e # v2.3.3 with: version: 1.1.308 working-directory: 'python_files' @@ -184,7 +184,7 @@ jobs: persist-credentials: false - name: Use Python ${{ matrix.python }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python }} @@ -236,7 +236,7 @@ jobs: sparse-checkout-cone-mode: false - name: Install Node - uses: actions/setup-node@v4 + uses: actions/setup-node@v5 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' @@ -252,7 +252,7 @@ jobs: run: npx @vscode/l10n-dev@latest export ./src - name: Install Python ${{ matrix.python }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python }} diff --git a/extensions/positron-python/.github/workflows/gen-issue-velocity.yml b/extensions/positron-python/.github/workflows/gen-issue-velocity.yml index c28c6c368562..fdcb41cdaba9 100644 --- a/extensions/positron-python/.github/workflows/gen-issue-velocity.yml +++ b/extensions/positron-python/.github/workflows/gen-issue-velocity.yml @@ -19,7 +19,7 @@ jobs: persist-credentials: false - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.x' diff --git a/extensions/positron-python/.github/workflows/pr-check.yml b/extensions/positron-python/.github/workflows/pr-check.yml index b410c37215b9..c932c314682d 100644 --- a/extensions/positron-python/.github/workflows/pr-check.yml +++ b/extensions/positron-python/.github/workflows/pr-check.yml @@ -101,7 +101,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Use Python ${{ env.PYTHON_VERSION }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} @@ -138,7 +138,7 @@ jobs: python -m pip install --upgrade -r build/test-requirements.txt - name: Run Pyright - uses: jakebailey/pyright-action@b5d50e5cde6547546a5c4ac92e416a8c2c1a1dfe # v2.3.2 + uses: jakebailey/pyright-action@6cabc0f01c4994be48fd45cd9dbacdd6e1ee6e5e # v2.3.3 with: version: 1.1.308 working-directory: 'python_files' @@ -168,7 +168,7 @@ jobs: persist-credentials: false - name: Use Python ${{ matrix.python }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python }} @@ -249,7 +249,7 @@ jobs: run: npx @vscode/l10n-dev@latest export ./src - name: Use Python ${{ matrix.python }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python }} @@ -505,7 +505,7 @@ jobs: sparse-checkout-cone-mode: false - name: Install Node - uses: actions/setup-node@v4 + uses: actions/setup-node@v5 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' @@ -520,7 +520,7 @@ jobs: run: npx @vscode/l10n-dev@latest export ./src - name: Use Python ${{ env.PYTHON_VERSION }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} cache: 'pip' diff --git a/extensions/positron-python/.github/workflows/pr-file-check.yml b/extensions/positron-python/.github/workflows/pr-file-check.yml index 688c48d865d8..da5d6fa8f696 100644 --- a/extensions/positron-python/.github/workflows/pr-file-check.yml +++ b/extensions/positron-python/.github/workflows/pr-file-check.yml @@ -44,7 +44,7 @@ jobs: failure-message: 'TypeScript code was edited without also editing a ${file-pattern} file; see the Testing page in our wiki on testing guidelines (the ${skip-label} label can be used to pass this check)' - name: 'Ensure PR has an associated issue' - uses: actions/github-script@v7 + uses: actions/github-script@v8 with: script: | const labels = context.payload.pull_request.labels.map(label => label.name); diff --git a/extensions/positron-python/package.json b/extensions/positron-python/package.json index 466f13d9d369..6a252a398922 100644 --- a/extensions/positron-python/package.json +++ b/extensions/positron-python/package.json @@ -1490,6 +1490,13 @@ "when": "controllerId == 'python-tests'" } ], + "testing/item/gutter": [ + { + "command": "python.copyTestId", + "group": "navigation", + "when": "controllerId == 'python-tests'" + } + ], "commandPalette": [ { "category": "Python", diff --git a/extensions/positron-python/python_files/ipykernel_requirements/cp3-requirements.txt b/extensions/positron-python/python_files/ipykernel_requirements/cp3-requirements.txt index b85af4128ce2..b54894c2bacd 100644 --- a/extensions/positron-python/python_files/ipykernel_requirements/cp3-requirements.txt +++ b/extensions/positron-python/python_files/ipykernel_requirements/cp3-requirements.txt @@ -6,15 +6,15 @@ # --only-binary :all: -psutil==7.1.0 \ - --hash=sha256:09ad740870c8d219ed8daae0ad3b726d3bf9a028a198e7f3080f6a1888b99bca \ - --hash=sha256:22e4454970b32472ce7deaa45d045b34d3648ce478e26a04c7e858a0a6e75ff3 \ - --hash=sha256:57f5e987c36d3146c0dd2528cd42151cf96cd359b9d67cfff836995cc5df9a3d \ - --hash=sha256:5d007560c8c372efdff9e4579c2846d71de737e4605f611437255e81efcca2c5 \ - --hash=sha256:6937cb68133e7c97b6cc9649a570c9a18ba0efebed46d8c5dae4c07fa1b67a07 \ - --hash=sha256:76168cef4397494250e9f4e73eb3752b146de1dd950040b29186d0cce1d5ca13 \ - --hash=sha256:7d4a113425c037300de3ac8b331637293da9be9713855c4fc9d2d97436d7259d \ - --hash=sha256:8c70e113920d51e89f212dd7be06219a9b88014e63a4cec69b684c327bc474e3 +psutil==7.1.1 \ + --hash=sha256:146a704f224fb2ded2be3da5ac67fc32b9ea90c45b51676f9114a6ac45616967 \ + --hash=sha256:295c4025b5cd880f7445e4379e6826f7307e3d488947bf9834e865e7847dc5f7 \ + --hash=sha256:2a95104eae85d088891716db676f780c1404fc15d47fde48a46a5d61e8f5ad2c \ + --hash=sha256:5457cf741ca13da54624126cd5d333871b454ab133999a9a103fb097a7d7d21a \ + --hash=sha256:8fa59d7b1f01f0337f12cd10dbd76e4312a4d3c730a4fedcbdd4e5447a8b8460 \ + --hash=sha256:92ebc58030fb054fa0f26c3206ef01c31c29d67aee1367e3483c16665c25c8d2 \ + --hash=sha256:98629cd8567acefcc45afe2f4ba1e9290f579eacf490a917967decce4b74ee9b \ + --hash=sha256:9b4f17c5f65e44f69bd3a3406071a47b79df45cf2236d1f717970afcb526bcd3 # via ipykernel tornado==6.5.2 \ --hash=sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c \ diff --git a/extensions/positron-python/python_files/ipykernel_requirements/py3-requirements.txt b/extensions/positron-python/python_files/ipykernel_requirements/py3-requirements.txt index aba26d5a1920..b2a8675dd8a6 100644 --- a/extensions/positron-python/python_files/ipykernel_requirements/py3-requirements.txt +++ b/extensions/positron-python/python_files/ipykernel_requirements/py3-requirements.txt @@ -58,8 +58,8 @@ executing==2.2.1 \ importlib-metadata==8.7.0 \ --hash=sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd # via jupyter-client -ipykernel==6.30.1 \ - --hash=sha256:aa6b9fb93dca949069d8b85b6c79b2518e32ac583ae9c7d37c51d119e18b3fb4 +ipykernel==6.31.0 \ + --hash=sha256:abe5386f6ced727a70e0eb0cf1da801fa7c5fa6ff82147747d5a0406cd8c94af # via python scripts/pip-compile-ipykernel.py ipython==8.18.1 \ --hash=sha256:e8267419d72d81955ec1177f8a29aaa90ac80ad647499201119e2f05e99aa397 diff --git a/extensions/positron-python/python_files/posit/pinned-test-requirements.txt b/extensions/positron-python/python_files/posit/pinned-test-requirements.txt index a314516791be..3255c7e97a63 100644 --- a/extensions/positron-python/python_files/posit/pinned-test-requirements.txt +++ b/extensions/positron-python/python_files/posit/pinned-test-requirements.txt @@ -10,45 +10,48 @@ # Then iterate through supported PYTHON_VERSIONS to make sure we covered the latest versions. -duckdb==1.3.2 +duckdb==1.4.1 fastcore==1.8.9; python_version == '3.9' -fastcore==1.8.11; python_version >= '3.10' +fastcore==1.8.13; python_version >= '3.10' geopandas==1.0.1; python_version == '3.9' geopandas==1.1.1; python_version >= '3.10' and python_version < '3.14' -haystack-ai==2.18.0; python_version < '3.14' +haystack-ai==2.19.0 holoviews==1.20.2; python_version == '3.9' holoviews==1.21.0; python_version >= '3.10' hvplot==0.11.3; python_version == '3.9' hvplot==0.12.1; python_version >= '3.10' -ibis-framework[duckdb]==10.8.0; python_version < '3.14' -ipykernel==6.30.1 +ibis-framework[duckdb]==11.0.0; python_version < '3.14' +ipykernel==6.31.0; python_version == '3.9' +ipykernel==7.0.1; python_version >= '3.10' ipython==8.18.1; python_version == '3.9' ipython==8.37.0; python_version == '3.10' -ipython==9.5.0; python_version >= '3.11' +ipython==9.6.0; python_version >= '3.11' ipywidgets==8.1.7 -lightning==2.5.5; python_version < '3.14' +lightning==2.5.5 matplotlib==3.9.4; python_version == '3.9' -matplotlib==3.10.6; python_version >= '3.10' +matplotlib==3.10.7; python_version >= '3.10' numpy==2.0.2; python_version == '3.9' numpy==2.2.6; python_version == '3.10' -numpy==2.3.3; python_version >= '3.11' -pandas==2.3.2 -plotly==6.3.0 +numpy==2.3.4; python_version >= '3.11' +pandas==2.3.3 +plotly==6.3.1 plotnine==0.13.6; python_version == '3.9' plotnine==0.15.0; python_version >= '3.10' -polars==1.33.1 -polars[timezone]==1.33.1; sys_platform == 'win32' +polars==1.34.0 +polars[timezone]==1.34.0; sys_platform == 'win32' pyarrow==21.0.0; python_version < '3.14' pytest==8.4.2 pytest-asyncio==1.2.0 pytest-mock==3.15.1 -syrupy==4.9.1 -torch==2.8.0; python_version < '3.14' +syrupy==4.9.1; python_version == '3.9' +syrupy==5.0.0; python_version >= '3.10' +torch==2.8.0; python_version == '3.9' +torch==2.9.0; python_version >= '3.10' scipy==1.13.1; python_version == '3.9' scipy==1.15.3; python_version == '3.10' scipy==1.16.2; python_version >= '3.11' -snowflake-connector-python==3.17.4; python_version < '3.14' -SQLAlchemy==2.0.43 +snowflake-connector-python==4.0.0; python_version < '3.14' +SQLAlchemy==2.0.44 # putting this last like test-requirements.txt bokeh==3.4.3; python_version == '3.9' diff --git a/extensions/positron-python/python_files/posit/test-requirements.txt b/extensions/positron-python/python_files/posit/test-requirements.txt index e7f47462c3a9..f089bc176cbf 100644 --- a/extensions/positron-python/python_files/posit/test-requirements.txt +++ b/extensions/positron-python/python_files/posit/test-requirements.txt @@ -1,7 +1,7 @@ -duckdb<1.4.0 +duckdb fastcore geopandas; python_version < '3.14' -haystack-ai; python_version < '3.14' +haystack-ai holoviews hvplot ibis-framework[polars]; python_version == '3.9' @@ -9,7 +9,7 @@ ibis-framework[duckdb]; python_version >= '3.10' and python_version < '3.14' ipykernel ipython ipywidgets -lightning; python_version < '3.14' +lightning matplotlib numpy pandas @@ -22,7 +22,7 @@ pytest pytest-asyncio pytest-mock syrupy -torch; python_version < '3.14' +torch scipy snowflake-connector-python; python_version < '3.14' sqlalchemy diff --git a/extensions/positron-python/python_files/positron_requirements/requirements.in b/extensions/positron-python/python_files/positron_requirements/requirements.in index 1187d017b50c..d699cbaabbfc 100644 --- a/extensions/positron-python/python_files/positron_requirements/requirements.in +++ b/extensions/positron-python/python_files/positron_requirements/requirements.in @@ -2,7 +2,7 @@ # To update requirements.txt, run the following commands. # Use Python 3.9 when creating the environment or using pip-tools # 1) Install `uv` https://docs.astral.sh/uv/getting-started/installation/ -# 2) uv pip compile --generate-hashes --upgrade python_files/positron_requirements/requirements.in > python_files/positron_requirements/requirements.txt +# 2) uv pip compile --python-version 3.9 --generate-hashes --upgrade python_files/positron_requirements/requirements.in > python_files/positron_requirements/requirements.txt docstring-to-markdown==0.13 jedi-language-server>=0.44.0 diff --git a/extensions/positron-python/python_files/positron_requirements/requirements.txt b/extensions/positron-python/python_files/positron_requirements/requirements.txt index 464624b1ad8a..15c4f8d179f3 100644 --- a/extensions/positron-python/python_files/positron_requirements/requirements.txt +++ b/extensions/positron-python/python_files/positron_requirements/requirements.txt @@ -1,8 +1,8 @@ # This file was autogenerated by uv via the following command: -# uv pip compile --generate-hashes python_files/positron_requirements/requirements.in -attrs==25.3.0 \ - --hash=sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3 \ - --hash=sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b +# uv pip compile --python-version 3.9 --generate-hashes python_files/positron_requirements/requirements.in +attrs==25.4.0 \ + --hash=sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11 \ + --hash=sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373 # via # cattrs # lsprotocol diff --git a/extensions/positron-python/python_files/python_server.py b/extensions/positron-python/python_files/python_server.py index 1689d9b8f7f9..77b43c692dc3 100644 --- a/extensions/positron-python/python_files/python_server.py +++ b/extensions/positron-python/python_files/python_server.py @@ -5,6 +5,7 @@ import sys import traceback import uuid +from pathlib import Path from typing import Dict, List, Optional, Union STDIN = sys.stdin @@ -172,6 +173,16 @@ def get_headers(): if __name__ == "__main__": + # https://docs.python.org/3/tutorial/modules.html#the-module-search-path + # The directory containing the input script (or the current directory when no file is specified). + # Here we emulate the same behavior like no file is specified. + input_script_dir = Path(__file__).parent + script_dir_str = str(input_script_dir) + if script_dir_str in sys.path: + sys.path.remove(script_dir_str) + while "" in sys.path: + sys.path.remove("") + sys.path.insert(0, "") while not STDIN.closed: try: headers = get_headers() diff --git a/extensions/positron-python/requirements.in b/extensions/positron-python/requirements.in index ba2339b1e966..8bbc9a0f3728 100644 --- a/extensions/positron-python/requirements.in +++ b/extensions/positron-python/requirements.in @@ -4,7 +4,7 @@ # 2) uv pip compile --generate-hashes --upgrade requirements.in -o requirements.txt # Unittest test adapter -typing-extensions==4.14.1 +typing-extensions==4.15.0 # Fallback env creator for debian microvenv diff --git a/extensions/positron-python/requirements.txt b/extensions/positron-python/requirements.txt index 2e6b6ee07783..dddc2ee9691c 100644 --- a/extensions/positron-python/requirements.txt +++ b/extensions/positron-python/requirements.txt @@ -46,9 +46,9 @@ tomli==2.2.1 \ --hash=sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a \ --hash=sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7 # via -r requirements.in -typing-extensions==4.14.1 \ - --hash=sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36 \ - --hash=sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76 +typing-extensions==4.15.0 \ + --hash=sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466 \ + --hash=sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548 # via -r requirements.in zipp==3.21.0 \ --hash=sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4 \ diff --git a/extensions/positron-python/src/client/testing/testController/common/resultResolver.ts b/extensions/positron-python/src/client/testing/testController/common/resultResolver.ts index 82856627e0c9..b92e7a870f20 100644 --- a/extensions/positron-python/src/client/testing/testController/common/resultResolver.ts +++ b/extensions/positron-python/src/client/testing/testController/common/resultResolver.ts @@ -99,6 +99,11 @@ export class PythonResultResolver implements ITestResultResolver { // if any tests exist, they should be populated in the test tree, regardless of whether there were errors or not. // parse and insert test data. + // Clear existing mappings before rebuilding test tree + this.runIdToTestItem.clear(); + this.runIdToVSid.clear(); + this.vsIdToRunId.clear(); + // If the test root for this folder exists: Workspace refresh, update its children. // Otherwise, it is a freshly discovered workspace, and we need to create a new test root and populate the test tree. populateTestTree(this.testController, rawTestData.tests, undefined, this, token); @@ -173,165 +178,291 @@ export class PythonResultResolver implements ITestResultResolver { } } + /** + * Collect all test case items from the test controller tree. + * Note: This performs full tree traversal - use cached lookups when possible. + */ + private collectAllTestCases(): TestItem[] { + const testCases: TestItem[] = []; + + this.testController.items.forEach((i) => { + const tempArr: TestItem[] = getTestCaseNodes(i); + testCases.push(...tempArr); + }); + + return testCases; + } + + /** + * Find a test item efficiently using cached maps with fallback strategies. + * Uses a three-tier approach: direct lookup, ID mapping, then tree search. + */ + private findTestItemByIdEfficient(keyTemp: string): TestItem | undefined { + // Try direct O(1) lookup first + const directItem = this.runIdToTestItem.get(keyTemp); + if (directItem) { + // Validate the item is still in the test tree + if (this.isTestItemValid(directItem)) { + return directItem; + } else { + // Clean up stale reference + this.runIdToTestItem.delete(keyTemp); + } + } + + // Try vsId mapping as fallback + const vsId = this.runIdToVSid.get(keyTemp); + if (vsId) { + // Search by VS Code ID in the controller + let foundItem: TestItem | undefined; + this.testController.items.forEach((item) => { + if (item.id === vsId) { + foundItem = item; + return; + } + if (!foundItem) { + item.children.forEach((child) => { + if (child.id === vsId) { + foundItem = child; + } + }); + } + }); + + if (foundItem) { + // Cache for future lookups + this.runIdToTestItem.set(keyTemp, foundItem); + return foundItem; + } else { + // Clean up stale mapping + this.runIdToVSid.delete(keyTemp); + this.vsIdToRunId.delete(vsId); + } + } + + // Last resort: full tree search + traceError(`Falling back to tree search for test: ${keyTemp}`); + const testCases = this.collectAllTestCases(); + return testCases.find((item) => item.id === vsId); + } + + /** + * Check if a TestItem is still valid (exists in the TestController tree) + * + * Time Complexity: O(depth) where depth is the maximum nesting level of the test tree. + * In most cases this is O(1) to O(3) since test trees are typically shallow. + */ + private isTestItemValid(testItem: TestItem): boolean { + // Simple validation: check if the item's parent chain leads back to the controller + let current: TestItem | undefined = testItem; + while (current?.parent) { + current = current.parent; + } + + // If we reached a root item, check if it's in the controller + if (current) { + return this.testController.items.get(current.id) === current; + } + + // If no parent chain, check if it's directly in the controller + return this.testController.items.get(testItem.id) === testItem; + } + + /** + * Clean up stale test item references from the cache maps. + * Validates cached items and removes any that are no longer in the test tree. + */ + public cleanupStaleReferences(): void { + const staleRunIds: string[] = []; + + // Check all runId->TestItem mappings + this.runIdToTestItem.forEach((testItem, runId) => { + if (!this.isTestItemValid(testItem)) { + staleRunIds.push(runId); + } + }); + + // Remove stale entries + staleRunIds.forEach((runId) => { + const vsId = this.runIdToVSid.get(runId); + this.runIdToTestItem.delete(runId); + this.runIdToVSid.delete(runId); + if (vsId) { + this.vsIdToRunId.delete(vsId); + } + }); + + if (staleRunIds.length > 0) { + traceVerbose(`Cleaned up ${staleRunIds.length} stale test item references`); + } + } + + /** + * Handle test items that errored during execution. + * Extracts error details, finds the corresponding TestItem, and reports the error to VS Code's Test Explorer. + */ + private handleTestError(keyTemp: string, testItem: any, runInstance: TestRun): void { + const rawTraceback = testItem.traceback ?? ''; + const traceback = splitLines(rawTraceback, { + trim: false, + removeEmptyEntries: true, + }).join('\r\n'); + const text = `${testItem.test} failed with error: ${testItem.message ?? testItem.outcome}\r\n${traceback}`; + const message = new TestMessage(text); + + const foundItem = this.findTestItemByIdEfficient(keyTemp); + + if (foundItem?.uri) { + if (foundItem.range) { + message.location = new Location(foundItem.uri, foundItem.range); + } + runInstance.errored(foundItem, message); + } + } + + /** + * Handle test items that failed during execution + */ + private handleTestFailure(keyTemp: string, testItem: any, runInstance: TestRun): void { + const rawTraceback = testItem.traceback ?? ''; + const traceback = splitLines(rawTraceback, { + trim: false, + removeEmptyEntries: true, + }).join('\r\n'); + + const text = `${testItem.test} failed: ${testItem.message ?? testItem.outcome}\r\n${traceback}`; + const message = new TestMessage(text); + + const foundItem = this.findTestItemByIdEfficient(keyTemp); + + if (foundItem?.uri) { + if (foundItem.range) { + message.location = new Location(foundItem.uri, foundItem.range); + } + runInstance.failed(foundItem, message); + } + } + + /** + * Handle test items that passed during execution + */ + private handleTestSuccess(keyTemp: string, runInstance: TestRun): void { + const grabTestItem = this.runIdToTestItem.get(keyTemp); + + if (grabTestItem !== undefined) { + const foundItem = this.findTestItemByIdEfficient(keyTemp); + if (foundItem?.uri) { + runInstance.passed(grabTestItem); + } + } + } + + /** + * Handle test items that were skipped during execution + */ + private handleTestSkipped(keyTemp: string, runInstance: TestRun): void { + const grabTestItem = this.runIdToTestItem.get(keyTemp); + + if (grabTestItem !== undefined) { + const foundItem = this.findTestItemByIdEfficient(keyTemp); + if (foundItem?.uri) { + runInstance.skipped(grabTestItem); + } + } + } + + /** + * Handle subtest failures + */ + private handleSubtestFailure(keyTemp: string, testItem: any, runInstance: TestRun): void { + const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp); + const parentTestItem = this.runIdToTestItem.get(parentTestCaseId); + + if (parentTestItem) { + const subtestStats = this.subTestStats.get(parentTestCaseId); + if (subtestStats) { + subtestStats.failed += 1; + } else { + this.subTestStats.set(parentTestCaseId, { + failed: 1, + passed: 0, + }); + clearAllChildren(parentTestItem); + } + + const subTestItem = this.testController?.createTestItem(subtestId, subtestId, parentTestItem.uri); + + if (subTestItem) { + const traceback = testItem.traceback ?? ''; + const text = `${testItem.subtest} failed: ${testItem.message ?? testItem.outcome}\r\n${traceback}`; + parentTestItem.children.add(subTestItem); + runInstance.started(subTestItem); + const message = new TestMessage(text); + if (parentTestItem.uri && parentTestItem.range) { + message.location = new Location(parentTestItem.uri, parentTestItem.range); + } + runInstance.failed(subTestItem, message); + } else { + throw new Error('Unable to create new child node for subtest'); + } + } else { + throw new Error('Parent test item not found'); + } + } + + /** + * Handle subtest successes + */ + private handleSubtestSuccess(keyTemp: string, runInstance: TestRun): void { + const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp); + const parentTestItem = this.runIdToTestItem.get(parentTestCaseId); + + if (parentTestItem) { + const subtestStats = this.subTestStats.get(parentTestCaseId); + if (subtestStats) { + subtestStats.passed += 1; + } else { + this.subTestStats.set(parentTestCaseId, { failed: 0, passed: 1 }); + clearAllChildren(parentTestItem); + } + + const subTestItem = this.testController?.createTestItem(subtestId, subtestId, parentTestItem.uri); + + if (subTestItem) { + parentTestItem.children.add(subTestItem); + runInstance.started(subTestItem); + runInstance.passed(subTestItem); + } else { + throw new Error('Unable to create new child node for subtest'); + } + } else { + throw new Error('Parent test item not found'); + } + } + + /** + * Process test execution results and update VS Code's Test Explorer with outcomes. + * Uses efficient lookup methods to handle large numbers of test results. + */ public _resolveExecution(payload: ExecutionTestPayload, runInstance: TestRun): void { const rawTestExecData = payload as ExecutionTestPayload; if (rawTestExecData !== undefined && rawTestExecData.result !== undefined) { - // Map which holds the subtest information for each test item. - - // iterate through payload and update the UI accordingly. for (const keyTemp of Object.keys(rawTestExecData.result)) { - const testCases: TestItem[] = []; - - // grab leaf level test items - this.testController.items.forEach((i) => { - const tempArr: TestItem[] = getTestCaseNodes(i); - testCases.push(...tempArr); - }); const testItem = rawTestExecData.result[keyTemp]; + // Delegate to specific outcome handlers using efficient lookups if (testItem.outcome === 'error') { - const rawTraceback = testItem.traceback ?? ''; - const traceback = splitLines(rawTraceback, { - trim: false, - removeEmptyEntries: true, - }).join('\r\n'); - const text = `${testItem.test} failed with error: ${ - testItem.message ?? testItem.outcome - }\r\n${traceback}`; - const message = new TestMessage(text); - - const grabVSid = this.runIdToVSid.get(keyTemp); - // search through freshly built array of testItem to find the failed test and update UI. - testCases.forEach((indiItem) => { - if (indiItem.id === grabVSid) { - if (indiItem.uri) { - if (indiItem.range) { - message.location = new Location(indiItem.uri, indiItem.range); - } - runInstance.errored(indiItem, message); - } - } - }); + this.handleTestError(keyTemp, testItem, runInstance); } else if (testItem.outcome === 'failure' || testItem.outcome === 'passed-unexpected') { - const rawTraceback = testItem.traceback ?? ''; - const traceback = splitLines(rawTraceback, { - trim: false, - removeEmptyEntries: true, - }).join('\r\n'); - - const text = `${testItem.test} failed: ${testItem.message ?? testItem.outcome}\r\n${traceback}`; - const message = new TestMessage(text); - - // note that keyTemp is a runId for unittest library... - const grabVSid = this.runIdToVSid.get(keyTemp); - // search through freshly built array of testItem to find the failed test and update UI. - testCases.forEach((indiItem) => { - if (indiItem.id === grabVSid) { - if (indiItem.uri) { - if (indiItem.range) { - message.location = new Location(indiItem.uri, indiItem.range); - } - runInstance.failed(indiItem, message); - } - } - }); + this.handleTestFailure(keyTemp, testItem, runInstance); } else if (testItem.outcome === 'success' || testItem.outcome === 'expected-failure') { - const grabTestItem = this.runIdToTestItem.get(keyTemp); - const grabVSid = this.runIdToVSid.get(keyTemp); - if (grabTestItem !== undefined) { - testCases.forEach((indiItem) => { - if (indiItem.id === grabVSid) { - if (indiItem.uri) { - runInstance.passed(grabTestItem); - } - } - }); - } + this.handleTestSuccess(keyTemp, runInstance); } else if (testItem.outcome === 'skipped') { - const grabTestItem = this.runIdToTestItem.get(keyTemp); - const grabVSid = this.runIdToVSid.get(keyTemp); - if (grabTestItem !== undefined) { - testCases.forEach((indiItem) => { - if (indiItem.id === grabVSid) { - if (indiItem.uri) { - runInstance.skipped(grabTestItem); - } - } - }); - } + this.handleTestSkipped(keyTemp, runInstance); } else if (testItem.outcome === 'subtest-failure') { - // split on [] or () based on how the subtest is setup. - const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp); - const parentTestItem = this.runIdToTestItem.get(parentTestCaseId); - const data = testItem; - // find the subtest's parent test item - if (parentTestItem) { - const subtestStats = this.subTestStats.get(parentTestCaseId); - if (subtestStats) { - subtestStats.failed += 1; - } else { - this.subTestStats.set(parentTestCaseId, { - failed: 1, - passed: 0, - }); - // clear since subtest items don't persist between runs - clearAllChildren(parentTestItem); - } - const subTestItem = this.testController?.createTestItem( - subtestId, - subtestId, - parentTestItem.uri, - ); - // create a new test item for the subtest - if (subTestItem) { - const traceback = data.traceback ?? ''; - const text = `${data.subtest} failed: ${ - testItem.message ?? testItem.outcome - }\r\n${traceback}`; - parentTestItem.children.add(subTestItem); - runInstance.started(subTestItem); - const message = new TestMessage(text); - if (parentTestItem.uri && parentTestItem.range) { - message.location = new Location(parentTestItem.uri, parentTestItem.range); - } - runInstance.failed(subTestItem, message); - } else { - throw new Error('Unable to create new child node for subtest'); - } - } else { - throw new Error('Parent test item not found'); - } + this.handleSubtestFailure(keyTemp, testItem, runInstance); } else if (testItem.outcome === 'subtest-success') { - // split on [] or () based on how the subtest is setup. - const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp); - const parentTestItem = this.runIdToTestItem.get(parentTestCaseId); - - // find the subtest's parent test item - if (parentTestItem) { - const subtestStats = this.subTestStats.get(parentTestCaseId); - if (subtestStats) { - subtestStats.passed += 1; - } else { - this.subTestStats.set(parentTestCaseId, { failed: 0, passed: 1 }); - // clear since subtest items don't persist between runs - clearAllChildren(parentTestItem); - } - const subTestItem = this.testController?.createTestItem( - subtestId, - subtestId, - parentTestItem.uri, - ); - // create a new test item for the subtest - if (subTestItem) { - parentTestItem.children.add(subTestItem); - runInstance.started(subTestItem); - runInstance.passed(subTestItem); - } else { - throw new Error('Unable to create new child node for subtest'); - } - } else { - throw new Error('Parent test item not found'); - } + this.handleSubtestSuccess(keyTemp, runInstance); } } } diff --git a/extensions/positron-python/src/test/testing/common/testingAdapter.test.ts b/extensions/positron-python/src/test/testing/common/testingAdapter.test.ts index dcd78dc23dba..97c04d5dfdf1 100644 --- a/extensions/positron-python/src/test/testing/common/testingAdapter.test.ts +++ b/extensions/positron-python/src/test/testing/common/testingAdapter.test.ts @@ -9,7 +9,11 @@ import * as fs from 'fs'; import * as os from 'os'; import * as sinon from 'sinon'; import { PytestTestDiscoveryAdapter } from '../../../client/testing/testController/pytest/pytestDiscoveryAdapter'; -import { ITestController, ITestResultResolver } from '../../../client/testing/testController/common/types'; +import { + ITestController, + ITestResultResolver, + ExecutionTestPayload, +} from '../../../client/testing/testController/common/types'; import { IPythonExecutionFactory } from '../../../client/common/process/types'; import { IConfigurationService } from '../../../client/common/types'; import { IServiceContainer } from '../../../client/ioc/types'; @@ -1033,4 +1037,206 @@ suite('End to End Tests: test adapters', () => { assert.strictEqual(failureOccurred, false, failureMsg); }); }); + + test('_resolveExecution performance test: validates efficient test result processing', async () => { + // This test validates that _resolveExecution processes test results efficiently + // without expensive tree rebuilding or linear searching operations. + // + // The test ensures that processing many test results (like parameterized tests) + // remains fast and doesn't cause performance issues or stack overflow. + + // ================================================================ + // SETUP: Initialize test environment and tracking variables + // ================================================================ + resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); + + // Performance tracking variables + let totalCallTime = 0; + let callCount = 0; + const callTimes: number[] = []; + let treeRebuildCount = 0; + let totalSearchOperations = 0; + + // Test configuration - Moderate scale to validate efficiency + const numTestFiles = 5; // Multiple test files + const testFunctionsPerFile = 10; // Test functions per file + const totalTestItems = numTestFiles * testFunctionsPerFile; // Total test items in mock tree + const numParameterizedResults = 15; // Number of parameterized test results to process + + // ================================================================ + // MOCK: Set up spies and function wrapping to track performance + // ================================================================ + + // Mock getTestCaseNodes to track expensive tree operations + const originalGetTestCaseNodes = require('../../../client/testing/testController/common/testItemUtilities') + .getTestCaseNodes; + const getTestCaseNodesSpy = sinon.stub().callsFake((item) => { + treeRebuildCount++; + const result = originalGetTestCaseNodes(item); + // Track search operations through tree items + // Safely handle undefined results + if (result && Array.isArray(result)) { + totalSearchOperations += result.length; + } + return result || []; // Return empty array if undefined + }); + + // Replace the real function with our spy + const testItemUtilities = require('../../../client/testing/testController/common/testItemUtilities'); + testItemUtilities.getTestCaseNodes = getTestCaseNodesSpy; + + // Wrap the _resolveExecution function to measure performance + const original_resolveExecution = resultResolver._resolveExecution.bind(resultResolver); + resultResolver._resolveExecution = async (payload, runInstance) => { + const startTime = performance.now(); + callCount++; + + // Call the actual implementation + await original_resolveExecution(payload, runInstance); + + const endTime = performance.now(); + const callTime = endTime - startTime; + callTimes.push(callTime); + totalCallTime += callTime; + + return Promise.resolve(); + }; + + // ================================================================ + // SETUP: Create test data that simulates realistic test scenarios + // ================================================================ + + // Create a mock TestController with the methods we need + const mockTestController = { + items: new Map(), + createTestItem: (id: string, label: string, uri?: Uri) => { + const childrenMap = new Map(); + // Add forEach method to children map to simulate TestItemCollection + (childrenMap as any).forEach = function (callback: (item: any) => void) { + Map.prototype.forEach.call(this, callback); + }; + + const mockTestItem = { + id, + label, + uri, + children: childrenMap, + parent: undefined, + canResolveChildren: false, + tags: [{ id: 'python-run' }, { id: 'python-debug' }], + }; + return mockTestItem; + }, + // Add a forEach method to simulate the problematic iteration + forEach: function (callback: (item: any) => void) { + this.items.forEach(callback); + }, + }; // Replace the testController in our resolver + (resultResolver as any).testController = mockTestController; + + // Create test controller with many test items (simulates real workspace) + for (let i = 0; i < numTestFiles; i++) { + const testItem = mockTestController.createTestItem( + `test_file_${i}`, + `Test File ${i}`, + Uri.file(`/test_${i}.py`), + ); + mockTestController.items.set(`test_file_${i}`, testItem); + + // Add child test items to each file + for (let j = 0; j < testFunctionsPerFile; j++) { + const childItem = mockTestController.createTestItem( + `test_${i}_${j}`, + `test_method_${j}`, + Uri.file(`/test_${i}.py`), + ); + testItem.children.set(`test_${i}_${j}`, childItem); + + // Set up the ID mappings that the resolver uses + resultResolver.runIdToTestItem.set(`test_${i}_${j}`, childItem as any); + resultResolver.runIdToVSid.set(`test_${i}_${j}`, `test_${i}_${j}`); + resultResolver.vsIdToRunId.set(`test_${i}_${j}`, `test_${i}_${j}`); + } + } // Create payload with multiple test results (simulates real test execution) + const testResults: Record = {}; + for (let i = 0; i < numParameterizedResults; i++) { + testResults[`test_0_${i % 20}`] = { + test: `test_method[${i}]`, + outcome: 'success', + message: null, + traceback: null, + subtest: null, + }; + } + + const payload: ExecutionTestPayload = { + cwd: '/test', + status: 'success' as const, + error: '', + result: testResults, + }; + + const mockRunInstance = { + passed: sinon.stub(), + failed: sinon.stub(), + errored: sinon.stub(), + skipped: sinon.stub(), + }; + + // ================================================================ + // EXECUTION: Run the performance test + // ================================================================ + + const overallStartTime = performance.now(); + + // Run the _resolveExecution function with test data + await resultResolver._resolveExecution(payload, mockRunInstance as any); + + const overallEndTime = performance.now(); + const totalTime = overallEndTime - overallStartTime; + + // ================================================================ + // CLEANUP: Restore original functions + // ================================================================ + testItemUtilities.getTestCaseNodes = originalGetTestCaseNodes; + + // ================================================================ + // ASSERT: Verify efficient performance characteristics + // ================================================================ + console.log(`\n=== PERFORMANCE RESULTS ===`); + console.log( + `Test setup: ${numTestFiles} files × ${testFunctionsPerFile} test functions = ${totalTestItems} total items`, + ); + console.log(`Total execution time: ${totalTime.toFixed(2)}ms`); + console.log(`Tree operations performed: ${treeRebuildCount}`); + console.log(`Search operations: ${totalSearchOperations}`); + console.log(`Average time per call: ${(totalCallTime / callCount).toFixed(2)}ms`); + console.log(`Results processed: ${numParameterizedResults}`); + + // Basic function call verification + assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); + + // EFFICIENCY VERIFICATION: Ensure minimal expensive operations + assert.strictEqual( + treeRebuildCount, + 0, + 'Expected ZERO tree rebuilds - efficient implementation should use cached lookups', + ); + + assert.strictEqual( + totalSearchOperations, + 0, + 'Expected ZERO linear search operations - efficient implementation should use direct lookups', + ); + + // Performance threshold verification - should be fast + assert.ok(totalTime < 100, `Function should complete quickly, took ${totalTime}ms (should be under 100ms)`); + + // Scalability check - time should not grow significantly with more results + const timePerResult = totalTime / numParameterizedResults; + assert.ok( + timePerResult < 10, + `Time per result should be minimal: ${timePerResult.toFixed(2)}ms per result (should be under 10ms)`, + ); + }); });