Skip to content
This repository was archived by the owner on Sep 10, 2025. It is now read-only.

Commit c2caa37

Browse files
authored
Merge branch 'pytorch:main' into patch-10
2 parents c31406c + 4dc2f89 commit c2caa37

File tree

24 files changed

+1599
-269
lines changed

24 files changed

+1599
-269
lines changed

.ci/scripts/run-docs

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,6 @@ if [ "$1" == "advanced" ]; then
7575
fi
7676

7777
if [ "$1" == "evaluation" ]; then
78-
79-
exit 0
80-
8178
echo "::group::Create script to run evaluation"
8279
python3 torchchat/utils/scripts/updown.py --file torchchat/utils/docs/evaluation.md --replace 'llama3:stories15M,-l 3:-l 2' --suppress huggingface-cli,HF_TOKEN > ./run-evaluation.sh
8380
# for good measure, if something happened to updown processor,

.github/workflows/pull.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -942,7 +942,7 @@ jobs:
942942
path: |
943943
./et-build
944944
./torchchat/utils/scripts
945-
key: et-build-${{runner.os}}-${{runner.arch}}-${{env.et-git-hash}}-${{ hashFiles('**/install_et.sh', '**/build_native.sh') }}
945+
key: et-build-${{runner.os}}-${{runner.arch}}-${{env.et-git-hash}}-${{ hashFiles('**/install_et.sh') }}
946946
- if: ${{ steps.install-et.outputs.cache-hit != 'true' }}
947947
continue-on-error: true
948948
run: |
@@ -1053,7 +1053,7 @@ jobs:
10531053
10541054
# Pull submodules (re2, abseil) for Tiktoken
10551055
git submodule sync
1056-
git submodule update --init --recursive
1056+
git submodule update --init
10571057
./runner/build_android.sh
10581058
echo "Tests complete."
10591059

.gitmodules

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,9 @@
1-
[submodule "runner/third-party/tokenizers"]
2-
path = runner/third-party/tokenizers
3-
url = https://github.com/pytorch-labs/tokenizers
1+
[submodule "tokenizer/third-party/abseil-cpp"]
2+
path = tokenizer/third-party/abseil-cpp
3+
url = https://github.com/abseil/abseil-cpp.git
4+
[submodule "tokenizer/third-party/re2"]
5+
path = tokenizer/third-party/re2
6+
url = https://github.com/google/re2.git
7+
[submodule "tokenizer/third-party/sentencepiece"]
8+
path = tokenizer/third-party/sentencepiece
9+
url = https://github.com/google/sentencepiece.git

CMakeLists.txt

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,21 +7,18 @@ ELSE()
77
ENDIF()
88

99
project(Torchchat)
10-
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-attributes")
1110

1211
# include tokenizer
13-
add_subdirectory(runner/third-party/tokenizers)
12+
add_subdirectory(tokenizer)
1413

1514
# include et_run executable
1615
include(runner/et.cmake)
1716
if(TARGET et_run)
18-
target_link_libraries(et_run PUBLIC tokenizers microkernels-prod)
19-
target_include_directories(et_run PUBLIC runner/third-party/tokenizers/include)
17+
target_link_libraries(et_run PUBLIC tokenizer microkernels-prod)
2018
endif()
2119

2220
# include aoti_run executable
2321
include(runner/aoti.cmake)
2422
if(TARGET aoti_run)
25-
target_link_libraries(aoti_run tokenizers)
26-
target_include_directories(aoti_run PUBLIC runner/third-party/tokenizers/include)
23+
target_link_libraries(aoti_run tokenizer)
2724
endif()

README.md

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -45,16 +45,16 @@ aliases.
4545

4646
| Model | Mobile Friendly | Notes |
4747
|------------------|---|---------------------|
48-
|[meta-llama/Meta-Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct)||Tuned for `chat` . Alias to `llama3.2-3b`.|
48+
|[meta-llama/Meta-Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct)||Tuned for `chat`. Alias to `llama3.2-3b`.|
4949
|[meta-llama/Meta-Llama-3.2-3B](https://huggingface.co/meta-llama/Llama-3.2-3B)||Best for `generate`. Alias to `llama3.2-3b-base`.|
50-
|[meta-llama/Llama-Guard-3-1B](https://huggingface.co/meta-llama/Llama-Guard-3-1B)||Tuned for classification . Alias to `llama3-1b-guard`.|
51-
|[meta-llama/Meta-Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct)||Tuned for `chat` . Alias to `llama3.2-1b`.|
50+
|[meta-llama/Llama-Guard-3-1B](https://huggingface.co/meta-llama/Llama-Guard-3-1B)||Tuned for classification. Alias to `llama3-1b-guard`.|
51+
|[meta-llama/Meta-Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct)||Tuned for `chat`. Alias to `llama3.2-1b`.|
5252
|[meta-llama/Meta-Llama-3.2-1B](https://huggingface.co/meta-llama/Llama-3.2-1B)||Best for `generate`. Alias to `llama3.2-1b-base`.|
53-
|[meta-llama/Llama-3.2-11B-Vision-Instruct](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct)||Multimodal (Image + Text). Tuned for `chat` . Alias to `llama3.2-11B`.|
54-
|[meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision)||Multimodal (Image + Text). Tuned for `generate` . Alias to `llama3.2-11B-base`.|
55-
|[meta-llama/Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct)||Tuned for `chat` . Alias to `llama3.1`.|
53+
|[meta-llama/Llama-3.2-11B-Vision-Instruct](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct)||Multimodal (Image + Text). Tuned for `chat`. Alias to `llama3.2-11B`.|
54+
|[meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision)||Multimodal (Image + Text). Tuned for `generate`. Alias to `llama3.2-11B-base`.|
55+
|[meta-llama/Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct)||Tuned for `chat`. Alias to `llama3.1`.|
5656
|[meta-llama/Meta-Llama-3.1-8B](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B)||Best for `generate`. Alias to `llama3.1-base`.|
57-
|[meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct)||Tuned for `chat` . Alias to `llama3`.|
57+
|[meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct)||Tuned for `chat`. Alias to `llama3`.|
5858
|[meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B)||Best for `generate`. Alias to `llama3-base`.|
5959
|[meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)||Tuned for `chat`. Alias to `llama2`.|
6060
|[meta-llama/Llama-2-13b-chat-hf](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf)||Tuned for `chat`. Alias to `llama2-13b-chat`.|
@@ -231,7 +231,7 @@ python3 torchchat.py server llama3.1
231231
```
232232
[skip default]: end
233233

234-
[shell default]: python3 torchchat.py server llama3.1 & server_pid=$!
234+
[shell default]: python3 torchchat.py server llama3.1 & server_pid=$! ; sleep 90 # wait for server to be ready to accept requests
235235

236236
In another terminal, query the server using `curl`. Depending on the model configuration, this query might take a few minutes to respond.
237237

docs/ADVANCED-USERS.md

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,20 @@
11
> [!WARNING]
22
> Files in this directory may be outdated, incomplete, scratch notes, or a WIP. torchchat provides no guarantees on these files as references. Please refer to the root README for stable features and documentation.
33
4-
# Torchchat is still in pre-release!
5-
6-
7-
Torchchat is currently in a pre-release state and under extensive development.
8-
94

105
# The Lost Manual: torchchat
116

127
[**Introduction**](#introduction) | [**Installation**](#installation) | [**Get Started**](#get-started) | [**Download**](#download) | [**Chat**](#chat) | [**Generate**](#generate) | [**Eval**](#eval) | [**Export**](#export) | [**Supported Systems**](#supported-systems) | [**Contributing**](#contributing) | [**License**](#license)
138

9+
<!--
10+
1411
[shell default]: HF_TOKEN="${SECRET_HF_TOKEN_PERIODIC}" huggingface-cli login
1512
1613
[shell default]: ./install/install_requirements.sh
1714
1815
[shell default]: TORCHCHAT_ROOT=${PWD} ./torchchat/utils/scripts/install_et.sh
1916
17+
-->
2018

2119
This is the advanced users' guide, if you're looking to get started
2220
with LLMs, please refer to the README at the root directory of the
@@ -465,7 +463,7 @@ significant impact on accuracy.
465463

466464
## Native (Stand-Alone) Execution of Exported Models
467465

468-
Refer to the [README](README.md] for an introduction to native
466+
Refer to the [README](README.md) for an introduction to native
469467
execution on servers, desktops, and laptops. Mobile and Edge execution for Android and iOS are
470468
described under [torchchat/edge/docs/Android.md] and [torchchat/edge/docs/iOS.md], respectively.
471469

install/install_requirements.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,10 +62,10 @@ echo "Using pip executable: $PIP_EXECUTABLE"
6262
# NOTE: If a newly-fetched version of the executorch repo changes the value of
6363
# PYTORCH_NIGHTLY_VERSION, you should re-run this script to install the necessary
6464
# package versions.
65-
PYTORCH_NIGHTLY_VERSION=dev20241013
65+
PYTORCH_NIGHTLY_VERSION=dev20241028
6666

6767
# Nightly version for torchvision
68-
VISION_NIGHTLY_VERSION=dev20241013
68+
VISION_NIGHTLY_VERSION=dev20241028
6969

7070
# Nightly version for torchtune
7171
TUNE_NIGHTLY_VERSION=dev20241013

0 commit comments

Comments
 (0)