Skip to content

Commit 1eb1be3

Browse files
larryliu0820facebook-github-bot
authored andcommitted
Remove old tokenizer/ directory in ExecuTorch (#9728)
Summary: X-link: meta-pytorch/tokenizers#39 Pull Request resolved: #9728 See what happens Reviewed By: lucylq Differential Revision: D72007597
1 parent ce74f8e commit 1eb1be3

39 files changed

+20
-130940
lines changed

.ci/scripts/test_llama_torchao_lowbit.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ cmake --build cmake-out/examples/models/llama -j16 --config Release
5555
download_stories_model_artifacts
5656

5757
echo "Creating tokenizer.bin"
58-
$PYTHON_EXECUTABLE -m extension.llm.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
58+
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
5959

6060
# Export model
6161
LLAMA_CHECKPOINT=stories110M.pt

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -757,7 +757,7 @@ if(EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR)
757757
endif()
758758

759759
if(EXECUTORCH_BUILD_EXTENSION_LLM)
760-
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/tokenizer)
760+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/tokenizers)
761761
endif()
762762

763763
if(EXECUTORCH_BUILD_EXTENSION_MODULE)

examples/models/llama/TARGETS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ runtime.python_library(
202202
":export_library",
203203
"//executorch/examples/models/llama/tokenizer:tiktoken_py",
204204
"//executorch/extension/llm/export:export_lib",
205-
"//executorch/extension/llm/tokenizer:tokenizer_py_lib",
205+
"//pytorch/tokenizers/pytorch_tokenizers:tokenizers",
206206
"//executorch/extension/pybindings:portable_lib",
207207
],
208208
)

examples/models/llama/eval_llama_lib.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,13 @@
1515
from executorch.examples.models.llama.export_llama_lib import (
1616
get_quantizer_and_quant_params,
1717
)
18-
from executorch.examples.models.llama.tokenizer.tiktoken import Tokenizer as Tiktoken
18+
from pytorch_tokenizers.tiktoken import TiktokenTokenizer as Tiktoken
1919

2020
from executorch.extension.llm.export.builder import LLMEdgeManager
21-
from executorch.extension.llm.tokenizer.tokenizer import (
22-
Tokenizer as SentencePieceTokenizer,
21+
from pytorch_tokenizers.llama2c import (
22+
Llama2cTokenizer as SentencePieceTokenizer,
2323
)
24-
from executorch.extension.llm.tokenizer.utils import get_tokenizer
24+
from pytorch_tokenizers import get_tokenizer
2525
from lm_eval.evaluator import simple_evaluate
2626
from torch.nn import CrossEntropyLoss
2727
from tqdm import tqdm

examples/models/llama/evaluate/eager_eval.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@
99

1010
import torch
1111
from executorch.examples.models.llama.tokenizer.tiktoken import Tokenizer as Tiktoken
12-
from executorch.extension.llm.tokenizer.tokenizer import (
13-
Tokenizer as SentencePieceTokenizer,
12+
from pytorch_tokenizers.llama2c import (
13+
Llama2cTokenizer as SentencePieceTokenizer,
1414
)
1515

1616
from lm_eval.models.huggingface import HFLM as eval_wrapper

examples/models/llama/runner/generation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
import torch
1212

13-
from executorch.extension.llm.tokenizer.utils import get_tokenizer
13+
from pytorch_tokenizers import get_tokenizer
1414

1515

1616
def sample_top_p(probs, p):

examples/models/llama/tokenizer/targets.bzl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ def define_common_targets():
1616
],
1717
exported_deps = [
1818
"//pytorch/tokenizers:tiktoken",
19-
"//executorch/extension/llm/tokenizer:tiktoken", # TODO: remove
2019
],
2120
visibility = [
2221
"@EXECUTORCH_CLIENTS",

examples/models/llama/tokenizer/test/test_tiktoken.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
#include <vector>
1212

13-
#include <executorch/extension/llm/tokenizer/tiktoken.h>
13+
#include <pytorch/tokenizers/tiktoken.h>
1414

1515
#include <gtest/gtest.h>
1616

@@ -21,9 +21,9 @@
2121
using namespace ::testing;
2222

2323
using ::example::Version;
24-
using ::executorch::extension::llm::Tokenizer;
25-
using ::executorch::runtime::Error;
26-
using ::executorch::runtime::Result;
24+
using ::tokenizers::Tokenizer;
25+
using ::tokenizers::Error;
26+
using ::tokenizers::Result;
2727

2828
static std::string get_resource_path(const std::string& name) {
2929
#ifdef EXECUTORCH_FB_BUCK
@@ -36,7 +36,7 @@ static std::string get_resource_path(const std::string& name) {
3636
class MultimodalTiktokenV5ExtensionTest : public Test {
3737
public:
3838
void SetUp() override {
39-
tokenizer_ = std::make_unique<executorch::extension::llm::Tiktoken>(
39+
tokenizer_ = std::make_unique<tokenizers::Tiktoken>(
4040
example::get_multimodal_special_tokens(), 0, 1);
4141
modelPath_ = get_resource_path("test_tiktoken_tokenizer.model");
4242
}

examples/qualcomm/oss_scripts/llama/CMakeLists.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
# LICENSE file in the root directory of this source tree.
66

77
# model sharding with custom op
8-
set(CUSTOM_OP_SRCS_FILE
8+
set(CUSTOM_OP_SRCS_FILE
99
"${EXECUTORCH_SOURCE_DIR}/extension/llm/custom_ops/op_fallback.cpp"
1010
)
1111
add_library(custom_ops ${CUSTOM_OP_SRCS_FILE})
@@ -35,7 +35,7 @@ list(
3535
list(
3636
APPEND
3737
_llama_runner__srcs
38-
${CMAKE_CURRENT_SOURCE_DIR}/../../../../extension/llm/tokenizer/tiktoken.cpp
38+
${CMAKE_CURRENT_SOURCE_DIR}/../../../../extension/llm/tokenizers/src/tiktoken.cpp
3939
${CMAKE_CURRENT_SOURCE_DIR}/../../../models/llama/tokenizer/llama_tiktoken.cpp
4040
)
4141

extension/llm/export/TARGETS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,6 @@ runtime.python_library(
4040
"//executorch/exir:lib",
4141
"//executorch/exir/backend:backend_details",
4242
"//executorch/extension/export_util:export_util",
43-
"//executorch/extension/llm/tokenizer:tokenizer_py_lib",
43+
"//pytorch/tokenizers/pytorch_tokenizers:tokenizers",
4444
],
4545
)

0 commit comments

Comments
 (0)