Skip to content

Commit 62a1cb4

Browse files
Merge branch 'main' into add-dtype-check-for-where
2 parents 9e99434 + ab31007 commit 62a1cb4

File tree

35 files changed

+203
-113
lines changed

35 files changed

+203
-113
lines changed

.ci/scripts/setup-samsung-linux-deps.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ set -ex
1111

1212
download_ai_lite_core() {
1313
API_BASE="https://soc-developer.semiconductor.samsung.com/api/v1/resource/ai-litecore/download"
14-
API_KEY="kn10SoSY3hkC-9Qny5TqD2mnqVrlupv3krnjLeBt5cY"
14+
API_KEY=$SAMSUNG_AI_LITECORE_KEY
1515

1616
VERSION="0.5"
1717
OS_NAME="Ubuntu 22.04"

.ci/scripts/test-cuda-build.sh

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@ set -exu
99

1010
CUDA_VERSION=${1:-"12.6"}
1111

12-
echo "=== Testing ExecutorTorch CUDA ${CUDA_VERSION} Build ==="
12+
echo "=== Testing ExecuTorch CUDA ${CUDA_VERSION} Build ==="
1313

14-
# Function to build and test ExecutorTorch with CUDA support
14+
# Function to build and test ExecuTorch with CUDA support
1515
test_executorch_cuda_build() {
1616
local cuda_version=$1
1717

18-
echo "Building ExecutorTorch with CUDA ${cuda_version} support..."
19-
echo "ExecutorTorch will automatically detect CUDA and install appropriate PyTorch wheel"
18+
echo "Building ExecuTorch with CUDA ${cuda_version} support..."
19+
echo "ExecuTorch will automatically detect CUDA and install appropriate PyTorch wheel"
2020

2121
# Check available resources before starting
2222
echo "=== System Information ==="
@@ -27,11 +27,11 @@ test_executorch_cuda_build() {
2727
nvcc --version || echo "nvcc not found"
2828
nvidia-smi || echo "nvidia-smi not found"
2929

30-
# Set CMAKE_ARGS to enable CUDA build - ExecutorTorch will handle PyTorch installation automatically
30+
# Set CMAKE_ARGS to enable CUDA build - ExecuTorch will handle PyTorch installation automatically
3131
export CMAKE_ARGS="-DEXECUTORCH_BUILD_CUDA=ON"
3232

33-
echo "=== Starting ExecutorTorch Installation ==="
34-
# Install ExecutorTorch with CUDA support with timeout and error handling
33+
echo "=== Starting ExecuTorch Installation ==="
34+
# Install ExecuTorch with CUDA support with timeout and error handling
3535
timeout 5400 ./install_executorch.sh || {
3636
local exit_code=$?
3737
echo "ERROR: install_executorch.sh failed with exit code: $exit_code"
@@ -41,15 +41,15 @@ test_executorch_cuda_build() {
4141
exit $exit_code
4242
}
4343

44-
echo "SUCCESS: ExecutorTorch CUDA build completed"
44+
echo "SUCCESS: ExecuTorch CUDA build completed"
4545

4646
# Verify the installation
47-
echo "=== Verifying ExecutorTorch CUDA Installation ==="
47+
echo "=== Verifying ExecuTorch CUDA Installation ==="
4848

49-
# Test that ExecutorTorch was built successfully
49+
# Test that ExecuTorch was built successfully
5050
python -c "
5151
import executorch
52-
print('SUCCESS: ExecutorTorch imported successfully')
52+
print('SUCCESS: ExecuTorch imported successfully')
5353
"
5454

5555
# Test CUDA availability and show details
@@ -60,7 +60,7 @@ try:
6060
print('INFO: CUDA available:', torch.cuda.is_available())
6161
6262
if torch.cuda.is_available():
63-
print('SUCCESS: CUDA is available for ExecutorTorch')
63+
print('SUCCESS: CUDA is available for ExecuTorch')
6464
print('INFO: CUDA version:', torch.version.cuda)
6565
print('INFO: GPU device count:', torch.cuda.device_count())
6666
print('INFO: Current GPU device:', torch.cuda.current_device())
@@ -74,16 +74,16 @@ try:
7474
print('SUCCESS: CUDA tensor operation completed on device:', z.device)
7575
print('INFO: Result tensor shape:', z.shape)
7676
77-
print('SUCCESS: ExecutorTorch CUDA integration verified')
77+
print('SUCCESS: ExecuTorch CUDA integration verified')
7878
else:
79-
print('WARNING: CUDA not detected, but ExecutorTorch built successfully')
79+
print('WARNING: CUDA not detected, but ExecuTorch built successfully')
8080
exit(1)
8181
except Exception as e:
82-
print('ERROR: ExecutorTorch CUDA test failed:', e)
82+
print('ERROR: ExecuTorch CUDA test failed:', e)
8383
exit(1)
8484
"
8585

86-
echo "SUCCESS: ExecutorTorch CUDA ${cuda_version} build and verification completed successfully"
86+
echo "SUCCESS: ExecuTorch CUDA ${cuda_version} build and verification completed successfully"
8787
}
8888

8989
# Main execution

.github/scripts/propose_ghstack_orig_pr.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,17 @@ def get_pr_stack_from_number(ref: str, repo: Repository) -> List[int]:
8686
return pr_stack
8787

8888

89+
def get_differential_revision(pr, repo: Repository) -> str:
90+
body = repo.get_pull(pr.number).body
91+
matches = re.findall(r"Differential Revision: .*", body)
92+
count = len(matches)
93+
if count == 1:
94+
# If there's more than one Differential Revision, let's just return empty
95+
# so that we can disambiguate manually.
96+
return matches[0]
97+
return ""
98+
99+
89100
def create_prs_for_orig_branch(pr_stack: List[int], repo: Repository):
90101
# For the first PR, we want to merge to `main` branch, and we will update
91102
# as we go through the stack
@@ -100,13 +111,15 @@ def create_prs_for_orig_branch(pr_stack: List[int], repo: Repository):
100111
# The PR we want to create is then "branch_to_merge" <- gh/user/x/orig
101112
# gh/user/x/orig is the clean diff between gh/user/x/base <- gh/user/x/head
102113
orig_branch_merge_head = pr.base.ref.replace("base", "orig")
114+
differential_revision_text = get_differential_revision(pr, repo)
103115
bot_metadata = f"""This PR was created by the merge bot to help merge the original PR into the main branch.
104116
ghstack PR number: https://github.com/pytorch/executorch/pull/{pr.number} by @{pr.user.login}
105117
^ Please use this as the source of truth for the PR details, comments, and reviews
106118
ghstack PR base: https://github.com/pytorch/executorch/tree/{pr.base.ref}
107119
ghstack PR head: https://github.com/pytorch/executorch/tree/{pr.head.ref}
108120
Merge bot PR base: https://github.com/pytorch/executorch/tree/{orig_branch_merge_base}
109121
Merge bot PR head: https://github.com/pytorch/executorch/tree/{orig_branch_merge_head}
122+
{differential_revision_text}
110123
@diff-train-skip-merge"""
111124

112125
existing_orig_pr = repo.get_pulls(

.github/workflows/pull.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -900,12 +900,14 @@ jobs:
900900
permissions:
901901
id-token: write
902902
contents: read
903+
secrets: inherit
903904
with:
904905
runner: linux.2xlarge
905906
docker-image: ci-image:executorch-ubuntu-22.04-clang12-android
906907
submodules: 'recursive'
907908
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
908909
timeout: 90
910+
secrets-env: SAMSUNG_AI_LITECORE_KEY
909911
script: |
910912
set -ex
911913
@@ -917,6 +919,7 @@ jobs:
917919
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "cmake"
918920
919921
# Setup Samsung SDK (AI Lite Core) and install enn backend
922+
export SAMSUNG_AI_LITECORE_KEY=$SECRET_SAMSUNG_AI_LITECORE_KEY
920923
source .ci/scripts/setup-samsung-linux-deps.sh
921924
922925
# Test models serially

.github/workflows/test-cuda-builds.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ jobs:
2424
strategy:
2525
fail-fast: false
2626
matrix:
27-
cuda-version: ["12.6", "12.8", "12.9"]
27+
cuda-version: ["12.6", "12.8", "13.0"]
2828

2929
name: test-executorch-cuda-build-${{ matrix.cuda-version }}
3030
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main

CONTRIBUTING.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -199,8 +199,7 @@ We use [`lintrunner`](https://pypi.org/project/lintrunner/) to help make sure th
199199
code follows our standards. Set it up with:
200200

201201
```
202-
pip install lintrunner==0.12.7
203-
pip install lintrunner-adapters==0.12.4
202+
./install_requirements.sh # (automatically run by install_executorch.sh)
204203
lintrunner init
205204
```
206205

backends/cortex_m/test/test_quantize_op_fusion_pass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,7 @@ def forward(self, x, y):
313313
# Apply passes
314314
transformed_program = self._apply_passes(edge_program)
315315

316-
# Generate ExecutorTorch program
316+
# Generate ExecuTorch program
317317
executorch_program = transformed_program.to_executorch()
318318

319319
# Verify the program contains the expected fused operator

backends/vulkan/test/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -303,13 +303,13 @@ def run_and_check_output(
303303
Returns:
304304
bool: True if outputs match within tolerance, False otherwise
305305
"""
306-
# Load the ExecutorTorch program
306+
# Load the ExecuTorch program
307307
executorch_module = _load_for_executorch_from_buffer(executorch_program.buffer)
308308

309309
# Flatten inputs for execution
310310
inputs_flattened, _ = tree_flatten(sample_inputs)
311311

312-
# Run the ExecutorTorch program
312+
# Run the ExecuTorch program
313313
model_output = executorch_module.run_method("forward", tuple(inputs_flattened))
314314

315315
# Generate reference outputs using the reference model

backends/xnnpack/runtime/XNNPACKBackend.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
#pragma once
22

3-
#include <executorch/runtime/platform/compiler.h>
4-
53
namespace executorch::backends::xnnpack {
64
/// The key for the backend. This is used to register the backend, check
75
/// availability, and get/set options.

backends/xnnpack/targets.bzl

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,3 +73,13 @@ def define_common_targets():
7373
# @lint-ignore BUCKLINT: Avoid `link_whole=True` (https://fburl.com/avoid-link-whole)
7474
link_whole = True,
7575
)
76+
77+
runtime.cxx_library(
78+
name = "xnnpack_interface",
79+
visibility = [
80+
"@EXECUTORCH_CLIENTS",
81+
],
82+
exported_headers = [
83+
"runtime/XNNPACKBackend.h",
84+
],
85+
)

0 commit comments

Comments
 (0)