Skip to content

Commit c84f57a

Browse files
committed
Run Python unit test CI on Windows
ghstack-source-id: 6beaa7b ghstack-comment-id: 3226055310 Pull-Request: #13716
1 parent 1520f9f commit c84f57a

File tree

15 files changed

+212
-25
lines changed

15 files changed

+212
-25
lines changed

.ci/scripts/unittest-windows.ps1

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
param (
2+
[string]$editable
3+
)
4+
5+
Set-PSDebug -Trace 1
6+
$ErrorActionPreference = 'Stop'
7+
$PSNativeCommandUseErrorActionPreference = $true
8+
9+
conda create --yes --quiet -n et python=3.12
10+
conda activate et
11+
12+
# Activate the VS environment - this is required for Dynamo to work, as it uses MSVC.
13+
# There are a bunch of environment variables that it requires.
14+
# See https://learn.microsoft.com/en-us/cpp/build/building-on-the-command-line.
15+
& "C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\Common7\Tools\Launch-VsDevShell.ps1" -Arch amd64
16+
17+
# Install test dependencies
18+
pip install -r .ci/docker/requirements-ci.txt
19+
20+
if ($editable -eq 'true') {
21+
install_executorch.bat --editable
22+
} else {
23+
install_executorch.bat
24+
}
25+
if ($LASTEXITCODE -ne 0) {
26+
Write-Host "Installation was unsuccessful. Exit code: $LASTEXITCODE."
27+
exit $LASTEXITCODE
28+
}
29+
30+
# Run pytest with coverage
31+
# pytest -n auto --cov=./ --cov-report=xml
32+
pytest --continue-on-collection-errors -v --full-trace -c pytest-windows.ini -n auto
33+
if ($LASTEXITCODE -ne 0) {
34+
Write-Host "Pytest invocation was unsuccessful. Exit code: $LASTEXITCODE."
35+
exit $LASTEXITCODE
36+
}

.github/workflows/_unittest.yml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ on:
1919
required: false
2020
type: string
2121
description: Install ExecuTorch in editable mode or not.
22+
default: 'false'
2223
python-version:
2324
required: false
2425
type: string
@@ -52,3 +53,14 @@ jobs:
5253
# This is needed to get the prebuilt PyTorch wheel from S3
5354
${CONDA_RUN} --no-capture-output pip install awscli==1.37.21
5455
.ci/scripts/unittest-macos.sh --build-tool "${{ inputs.build-tool }}" --build-mode "${{ inputs.build-mode }}" --editable "${{ inputs.editable }}"
56+
57+
windows:
58+
if: ${{ inputs.build-tool == 'cmake' }}
59+
uses: pytorch/test-infra/.github/workflows/windows_job.yml@main
60+
with:
61+
submodules: 'recursive'
62+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
63+
timeout: 120
64+
script: |
65+
conda init powershell
66+
powershell .ci/scripts/unittest-windows.ps1 -editable "${{ inputs.editable }}"

.gitignore

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,3 +65,7 @@ xcuserdata/
6565

6666
# Android
6767
*.aar
68+
69+
# Windows
70+
*.dll
71+
*.pyd
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import sys
8+
9+
try:
10+
import pytest
11+
12+
# Skip on Windows
13+
if sys.platform == "win32":
14+
pytest.skip("Core ML is not available on Windows.", allow_module_level=True)
15+
16+
except ImportError:
17+
pass

backends/xnnpack/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ endforeach()
6262
if(WIN32 AND NOT CMAKE_CROSSCOMPILING)
6363
set(MV_COMMAND
6464
powershell -Command
65-
"Move-Item -Path ${_xnnpack_flatbuffer__outputs} -Destination ${_xnnpack_schema__outputs}"
65+
"Move-Item -Path ${_xnnpack_flatbuffer__outputs} -Destination ${_xnnpack_schema__outputs} -Force"
6666
)
6767
else()
6868
set(MV_COMMAND mv ${_xnnpack_flatbuffer__outputs} ${_xnnpack_schema__outputs})

examples/models/llama/source_transformation/custom_kv_cache.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ def replace_kv_cache_with_quantized_kv_cache(module):
269269
executorch_package_path = executorch.__path__[-1]
270270
libs = list(
271271
glob.glob(
272-
f"{executorch_package_path}/**/libquantized_ops_aot_lib.*",
272+
f"{executorch_package_path}/**/*quantized_ops_aot_lib.*",
273273
recursive=True,
274274
)
275275
)

exir/tracer.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
from torch._decomp import get_decompositions
4949
from torch._dynamo.guards import Guard
5050
from torch._functorch.eager_transforms import _maybe_unwrap_functional_tensor
51+
5152
from torch.export import default_decompositions
5253
from torch.func import functionalize
5354
from torch.fx.operator_schemas import normalize_function

extension/llm/custom_ops/custom_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
package_path = Path(__file__).parent.resolve()
3434
logging.info(f"Looking for libcustom_ops_aot_lib.so in {package_path}")
3535

36-
libs = list(package_path.glob("**/libcustom_ops_aot_lib.*"))
36+
libs = list(package_path.glob("**/*custom_ops_aot_lib.*"))
3737

3838
assert len(libs) == 1, f"Expected 1 library but got {len(libs)}"
3939
logging.info(f"Loading custom ops library: {libs[0]}")

extension/llm/custom_ops/op_tile_crop_aot.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
tile_crop = torch.ops.preprocess.tile_crop.default
1414
assert tile_crop is not None
1515
except:
16-
libs = list(Path(__file__).parent.resolve().glob("libcustom_ops_aot_lib.*"))
16+
libs = list(Path(__file__).parent.resolve().glob("*custom_ops_aot_lib.*"))
1717
assert len(libs) == 1, f"Expected 1 library but got {len(libs)}"
1818
logging.info(f"Loading custom ops library: {libs[0]}")
1919
torch.ops.load_library(libs[0])

install_executorch.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -194,14 +194,6 @@ def main(args):
194194
clean()
195195
return
196196

197-
cmake_args = [os.getenv("CMAKE_ARGS", "")]
198-
# Use ClangCL on Windows.
199-
# ClangCL is an alias to Clang that configures it to work in an MSVC-compatible
200-
# mode. Using it on Windows to avoid compiler compatibility issues for MSVC.
201-
if os.name == "nt":
202-
cmake_args.append("-T ClangCL")
203-
os.environ["CMAKE_ARGS"] = " ".join(cmake_args)
204-
205197
check_and_update_submodules()
206198
# This option is used in CI to make sure that PyTorch build from the pinned commit
207199
# is used instead of nightly. CI jobs wouldn't be able to catch regression from the

0 commit comments

Comments
 (0)