Skip to content

Commit ab31007

Browse files
authored
solve typo ExecutorTorch to ExecuTorch
Differential Revision: D83006323 Pull Request resolved: #14491
1 parent 7e228ee commit ab31007

File tree

15 files changed

+64
-66
lines changed

15 files changed

+64
-66
lines changed

.ci/scripts/test-cuda-build.sh

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@ set -exu
99

1010
CUDA_VERSION=${1:-"12.6"}
1111

12-
echo "=== Testing ExecutorTorch CUDA ${CUDA_VERSION} Build ==="
12+
echo "=== Testing ExecuTorch CUDA ${CUDA_VERSION} Build ==="
1313

14-
# Function to build and test ExecutorTorch with CUDA support
14+
# Function to build and test ExecuTorch with CUDA support
1515
test_executorch_cuda_build() {
1616
local cuda_version=$1
1717

18-
echo "Building ExecutorTorch with CUDA ${cuda_version} support..."
19-
echo "ExecutorTorch will automatically detect CUDA and install appropriate PyTorch wheel"
18+
echo "Building ExecuTorch with CUDA ${cuda_version} support..."
19+
echo "ExecuTorch will automatically detect CUDA and install appropriate PyTorch wheel"
2020

2121
# Check available resources before starting
2222
echo "=== System Information ==="
@@ -27,11 +27,11 @@ test_executorch_cuda_build() {
2727
nvcc --version || echo "nvcc not found"
2828
nvidia-smi || echo "nvidia-smi not found"
2929

30-
# Set CMAKE_ARGS to enable CUDA build - ExecutorTorch will handle PyTorch installation automatically
30+
# Set CMAKE_ARGS to enable CUDA build - ExecuTorch will handle PyTorch installation automatically
3131
export CMAKE_ARGS="-DEXECUTORCH_BUILD_CUDA=ON"
3232

33-
echo "=== Starting ExecutorTorch Installation ==="
34-
# Install ExecutorTorch with CUDA support with timeout and error handling
33+
echo "=== Starting ExecuTorch Installation ==="
34+
# Install ExecuTorch with CUDA support with timeout and error handling
3535
timeout 5400 ./install_executorch.sh || {
3636
local exit_code=$?
3737
echo "ERROR: install_executorch.sh failed with exit code: $exit_code"
@@ -41,15 +41,15 @@ test_executorch_cuda_build() {
4141
exit $exit_code
4242
}
4343

44-
echo "SUCCESS: ExecutorTorch CUDA build completed"
44+
echo "SUCCESS: ExecuTorch CUDA build completed"
4545

4646
# Verify the installation
47-
echo "=== Verifying ExecutorTorch CUDA Installation ==="
47+
echo "=== Verifying ExecuTorch CUDA Installation ==="
4848

49-
# Test that ExecutorTorch was built successfully
49+
# Test that ExecuTorch was built successfully
5050
python -c "
5151
import executorch
52-
print('SUCCESS: ExecutorTorch imported successfully')
52+
print('SUCCESS: ExecuTorch imported successfully')
5353
"
5454

5555
# Test CUDA availability and show details
@@ -60,7 +60,7 @@ try:
6060
print('INFO: CUDA available:', torch.cuda.is_available())
6161
6262
if torch.cuda.is_available():
63-
print('SUCCESS: CUDA is available for ExecutorTorch')
63+
print('SUCCESS: CUDA is available for ExecuTorch')
6464
print('INFO: CUDA version:', torch.version.cuda)
6565
print('INFO: GPU device count:', torch.cuda.device_count())
6666
print('INFO: Current GPU device:', torch.cuda.current_device())
@@ -74,16 +74,16 @@ try:
7474
print('SUCCESS: CUDA tensor operation completed on device:', z.device)
7575
print('INFO: Result tensor shape:', z.shape)
7676
77-
print('SUCCESS: ExecutorTorch CUDA integration verified')
77+
print('SUCCESS: ExecuTorch CUDA integration verified')
7878
else:
79-
print('WARNING: CUDA not detected, but ExecutorTorch built successfully')
79+
print('WARNING: CUDA not detected, but ExecuTorch built successfully')
8080
exit(1)
8181
except Exception as e:
82-
print('ERROR: ExecutorTorch CUDA test failed:', e)
82+
print('ERROR: ExecuTorch CUDA test failed:', e)
8383
exit(1)
8484
"
8585

86-
echo "SUCCESS: ExecutorTorch CUDA ${cuda_version} build and verification completed successfully"
86+
echo "SUCCESS: ExecuTorch CUDA ${cuda_version} build and verification completed successfully"
8787
}
8888

8989
# Main execution

backends/cortex_m/test/test_quantize_op_fusion_pass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,7 @@ def forward(self, x, y):
313313
# Apply passes
314314
transformed_program = self._apply_passes(edge_program)
315315

316-
# Generate ExecutorTorch program
316+
# Generate ExecuTorch program
317317
executorch_program = transformed_program.to_executorch()
318318

319319
# Verify the program contains the expected fused operator

backends/vulkan/test/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -303,13 +303,13 @@ def run_and_check_output(
303303
Returns:
304304
bool: True if outputs match within tolerance, False otherwise
305305
"""
306-
# Load the ExecutorTorch program
306+
# Load the ExecuTorch program
307307
executorch_module = _load_for_executorch_from_buffer(executorch_program.buffer)
308308

309309
# Flatten inputs for execution
310310
inputs_flattened, _ = tree_flatten(sample_inputs)
311311

312-
# Run the ExecutorTorch program
312+
# Run the ExecuTorch program
313313
model_output = executorch_module.run_method("forward", tuple(inputs_flattened))
314314

315315
# Generate reference outputs using the reference model

devtools/scripts/profile_model.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
#!/bin/bash
99

10-
# ExecutorTorch Model Profiling Script
10+
# ExecuTorch Model Profiling Script
1111
#
1212
# This script automates the process of building executor_runner with profiling enabled,
1313
# running model inference with ETDump collection, and generating CSV profiling reports.

docs/source/llm/export-llm.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ Instead of needing to manually write code to call torch.export(), use ExecuTorch
44

55
## Prerequisites
66

7-
The LLM export functionality requires the `pytorch_tokenizers` package. If you encounter a `ModuleNotFoundError: No module named 'pytorch_tokenizers'` error, install it from the ExecutorTorch source code:
7+
The LLM export functionality requires the `pytorch_tokenizers` package. If you encounter a `ModuleNotFoundError: No module named 'pytorch_tokenizers'` error, install it from the ExecuTorch source code:
88

99
```bash
1010
pip install -e ./extension/llm/tokenizers/

docs/source/ptd-file-format.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ The flatbuffer-encoded metadata follows the headers and contains:
111111
### Tensor Layout
112112

113113
If a data segment contains a canonical tensor, it may have associated layout information:
114-
- **Scalar type**: Data type (float32, int32, etc.) using ExecutorTorch scalar types.
114+
- **Scalar type**: Data type (float32, int32, etc.) using ExecuTorch scalar types.
115115
- **Sizes**: Dimensions of the tensor.
116116
- **Dim order**: Memory layout order specifying how dimensions are arranged in memory.
117117

docs/source/using-executorch-faqs.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ if you are using Ubuntu, or use an equivalent install command.
1616

1717
### ModuleNotFoundError: No module named 'pytorch_tokenizers'
1818

19-
The `pytorch_tokenizers` package is required for LLM export functionality. Install it from the ExecutorTorch source code:
19+
The `pytorch_tokenizers` package is required for LLM export functionality. Install it from the ExecuTorch source code:
2020
```
2121
pip install -e ./extension/llm/tokenizers/
2222
```

examples/vulkan/export.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ def main() -> None:
210210
else:
211211
logging.error("✗ Model test FAILED - outputs do not match reference")
212212
raise RuntimeError(
213-
"Model validation failed: ExecutorTorch outputs do not match reference model outputs"
213+
"Model validation failed: ExecuTorch outputs do not match reference model outputs"
214214
)
215215

216216
if args.bundled:

exir/emit/test/test_emit.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1920,7 +1920,7 @@ def forward(self, x):
19201920
program_buffer = et_program.buffer
19211921
et_module = _load_for_executorch_from_buffer(program_buffer)
19221922
for _, (inp, expected) in enumerate(zip(test_inputs, reference_outputs)):
1923-
# Execute with ExecutorTorch
1923+
# Execute with ExecuTorch
19241924
et_output = et_module.forward([inp])
19251925
et_result = et_output[0] # Get first output
19261926
# Compare results

extension/audio/mel_spectrogram.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ def export_processor(model=None, output_file="whisper_preprocess.pte"):
213213

214214
def main():
215215
parser = argparse.ArgumentParser(
216-
description="Export WhisperAudioProcessor to ExecutorTorch"
216+
description="Export WhisperAudioProcessor to ExecuTorch"
217217
)
218218
parser.add_argument(
219219
"--feature_size",

0 commit comments

Comments
 (0)