Skip to content

Commit 7418d2e

Browse files
committed
Phi3 runner uses TextLLMRunner
As titled, this PR started to use `TextLLMRunner` to run phi-3-mini. Eager model comes from Huggingface, not using kv cache as custom op because it is only being supported on Optimum-executorch repo. Performance may not be the best.
1 parent 042eb1a commit 7418d2e

File tree

13 files changed

+175
-266
lines changed

13 files changed

+175
-266
lines changed

.ci/scripts/test_phi_3_mini.sh

Lines changed: 4 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -22,31 +22,14 @@ NPROC=8
2222
if hash nproc &> /dev/null; then NPROC=$(nproc); fi
2323

2424
cmake_install_executorch_libraries() {
25-
cmake -DPYTHON_EXECUTABLE=python \
26-
-DCMAKE_INSTALL_PREFIX=${BUILD_DIR} \
27-
-DEXECUTORCH_ENABLE_LOGGING=1 \
28-
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
29-
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
30-
-DEXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR=ON \
31-
-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
32-
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
33-
-DEXECUTORCH_BUILD_XNNPACK=ON \
34-
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
35-
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
36-
-DEXECUTORCH_BUILD_KERNELS_LLM=ON \
37-
-B${BUILD_DIR} .
38-
39-
cmake --build ${BUILD_DIR} -j${NPROC} --target install --config ${BUILD_TYPE}
25+
cmake --preset llm -DCMAKE_INSTALL_PREFIX=cmake-out -DCMAKE_BUILD_TYPE=${BUILD_TYPE}
26+
27+
cmake --build cmake-out -j16 --target install --config ${BUILD_TYPE}
4028
}
4129

4230
cmake_build_phi_3_mini() {
43-
cmake -DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \
44-
-DCMAKE_INSTALL_PREFIX=${BUILD_DIR} \
31+
cmake -DCMAKE_INSTALL_PREFIX=${BUILD_DIR} \
4532
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
46-
-DEXECUTORCH_BUILD_KERNELS_LLM=ON \
47-
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
48-
-DEXECUTORCH_BUILD_XNNPACK=ON \
49-
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
5033
-B${BUILD_DIR}/${MODEL_DIR} \
5134
${MODEL_DIR}
5235

examples/models/phi-3-mini/CMakeLists.txt

Lines changed: 11 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -20,17 +20,14 @@ set(CMAKE_CXX_STANDARD 17)
2020
set(CMAKE_CXX_STANDARD_REQUIRED True)
2121
set(CMAKE_BUILD_TYPE Release)
2222

23-
# Set options for executorch build.
24-
option(EXECUTORCH_BUILD_EXTENSION_MODULE "" ON)
25-
option(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER "" ON)
26-
option(EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR "" ON)
27-
option(EXECUTORCH_BUILD_EXTENSION_TENSOR "" ON)
28-
option(EXECUTORCH_BUILD_KERNELS_OPTIMIZED "" ON)
29-
option(EXECUTORCH_BUILD_XNNPACK "" ON)
30-
31-
add_subdirectory(
32-
${CMAKE_CURRENT_SOURCE_DIR}/../../.. ${CMAKE_BINARY_DIR}/../../..
33-
)
23+
set(EXECUTORCH_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/../../..")
24+
find_package(executorch CONFIG REQUIRED)
25+
26+
target_link_options_shared_lib(executorch)
27+
28+
set(BUILD_TESTING OFF)
29+
add_subdirectory(${EXECUTORCH_ROOT}/extension/llm/runner ${CMAKE_BINARY_DIR}/../../../extension/llm/runner)
30+
3431
if(NOT TARGET gflags)
3532
add_subdirectory(
3633
${CMAKE_CURRENT_SOURCE_DIR}/../../../third-party/gflags
@@ -40,16 +37,9 @@ endif()
4037

4138
add_executable(
4239
phi_3_mini_runner
43-
main.cpp runner.cpp
44-
${CMAKE_CURRENT_SOURCE_DIR}/../../../extension/llm/sampler/sampler.cpp
45-
${CMAKE_CURRENT_SOURCE_DIR}/../../../extension/llm/tokenizers/src/llama2c_tokenizer.cpp
46-
)
47-
target_include_directories(
48-
phi_3_mini_runner
49-
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../../../third-party/gflags/src
50-
${CMAKE_CURRENT_SOURCE_DIR}/../../../extension/llm/tokenizers/include
40+
main.cpp
5141
)
42+
5243
target_link_libraries(
53-
phi_3_mini_runner PRIVATE executorch extension_module_static extension_tensor
54-
optimized_native_cpu_ops_lib xnnpack_backend gflags
44+
phi_3_mini_runner PUBLIC executorch optimized_native_cpu_ops_lib xnnpack_backend gflags extension_llm_runner
5545
)

examples/models/phi-3-mini/README.md

Lines changed: 8 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -21,33 +21,17 @@ python -m examples.models.phi-3-mini.export_phi-3-mini -c "4k" -s 128 -o phi-3-m
2121
```
2222
3. Build and run the model.
2323
- Build executorch with optimized CPU performance as follows. Build options available [here](https://github.com/pytorch/executorch/blob/main/CMakeLists.txt#L59).
24-
```
25-
cmake -DPYTHON_EXECUTABLE=python \
26-
-DCMAKE_INSTALL_PREFIX=cmake-out \
27-
-DEXECUTORCH_ENABLE_LOGGING=1 \
28-
-DCMAKE_BUILD_TYPE=Release \
29-
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
30-
-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
31-
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
32-
-DEXECUTORCH_BUILD_XNNPACK=ON \
33-
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
34-
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
35-
-DEXECUTORCH_BUILD_KERNELS_LLM=ON \
36-
-Bcmake-out .
24+
```
25+
cmake --preset llm -DCMAKE_INSTALL_PREFIX=cmake-out
3726
38-
cmake --build cmake-out -j16 --target install --config Release
39-
```
27+
cmake --build cmake-out -j16 --target install --config Release
28+
```
4029
- Build Phi-3-mini runner.
4130
```
42-
cmake -DPYTHON_EXECUTABLE=python \
43-
-DCMAKE_INSTALL_PREFIX=cmake-out \
44-
-DCMAKE_BUILD_TYPE=Release \
45-
-DEXECUTORCH_BUILD_KERNELS_LLM=ON \
46-
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
47-
-DEXECUTORCH_BUILD_XNNPACK=ON \
48-
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
49-
-Bcmake-out/examples/models/phi-3-mini \
50-
examples/models/phi-3-mini
31+
cmake -DCMAKE_INSTALL_PREFIX=cmake-out \
32+
-DCMAKE_BUILD_TYPE=Release \
33+
-Bcmake-out/examples/models/phi-3-mini \
34+
examples/models/phi-3-mini
5135
5236
cmake --build cmake-out/examples/models/phi-3-mini -j16 --config Release
5337
```

examples/models/phi-3-mini/export_phi-3-mini.py

Lines changed: 81 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,42 @@
1919
XNNPACKQuantizer,
2020
)
2121
from executorch.backends.xnnpack.utils.configs import get_xnnpack_edge_compile_config
22-
from executorch.exir import to_edge
22+
from executorch.exir import to_edge_transform_and_lower
23+
from executorch.exir.capture._config import ExecutorchBackendConfig
24+
from executorch.exir.passes import MemoryPlanningPass
25+
from executorch.exir.passes.sym_shape_eval_pass import ConstraintBasedSymShapeEvalPass
2326
from torch.export import export_for_training
27+
from torch.nn.attention import SDPBackend
2428
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e
2529

2630
from transformers import Phi3ForCausalLM
31+
from transformers.cache_utils import StaticCacheConfig
2732

28-
from .phi_3_mini import Phi3Mini
33+
from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM
34+
35+
36+
def _prepare_export_inputs(max_seq_len: int, sliding_window: int):
37+
"""
38+
Prepare example inputs and configurations for export.
39+
40+
Returns:
41+
example_input_ids (torch.Tensor): Example input IDs tensor.
42+
example_cache_position (torch.Tensor): Example cache position tensor.
43+
dynamic_shapes (dict or None): Dynamic shape specifications for export.
44+
strict (bool): Whether to use strict export mode.
45+
"""
46+
# Prepare inputs with dynamic shapes
47+
seq_length = 3 # Sequence length > 1 to avoid specialization issues
48+
example_input_ids = torch.zeros((1, seq_length), dtype=torch.long)
49+
example_cache_position = torch.arange(seq_length, dtype=torch.long)
50+
max_dim = min(max_seq_len, sliding_window) - 1
51+
seq_len_dim = torch.export.Dim("seq_length_dim", max=max_dim)
52+
dynamic_shapes = {
53+
"input_ids": {1: seq_len_dim},
54+
"cache_position": {0: seq_len_dim},
55+
}
56+
57+
return example_input_ids, example_cache_position, dynamic_shapes
2958

3059

3160
def export(args) -> None:
@@ -40,51 +69,70 @@ def export(args) -> None:
4069
f"Invalid context length {args.context_length}. Should be either 4k or 128k"
4170
)
4271

43-
with torch.no_grad():
44-
model = Phi3Mini(
45-
# pyre-ignore: Undefined attribute [16]: Module `transformers` has no attribute `Phi3ForCausalLM`
46-
model=Phi3ForCausalLM.from_pretrained(model_name),
72+
with torch.nn.attention.sdpa_kernel([SDPBackend.MATH]), torch.no_grad():
73+
model = Phi3ForCausalLM.from_pretrained(model_name)
74+
model.generation_config.cache_implementation = "static"
75+
model.generation_config.cache_config = StaticCacheConfig(
76+
batch_size=1, max_cache_len=model.config.max_position_embeddings
77+
)
78+
79+
exportable_module = TorchExportableModuleForDecoderOnlyLM(
80+
model,
4781
max_batch_size=1,
48-
max_seq_len=args.seq_len,
82+
max_cache_len=model.config.max_position_embeddings,
4983
)
50-
example_inputs = (
51-
torch.tensor(
52-
[[1048, 263, 931, 746]], dtype=torch.long, requires_grad=False
53-
),
84+
input_ids, cache_position, dynamic_shapes = _prepare_export_inputs(
85+
model.config.max_position_embeddings, model.config.sliding_window
86+
)
87+
example_inputs = (input_ids, cache_position)
88+
exported_program = exportable_module.export(
89+
input_ids, cache_position, dynamic_shapes, strict=False
90+
)
91+
# Apply RemoveTransposes pass to remove
92+
# any back-to-back transpose ops that are not needed
93+
# e.g. output of update_cache is transposed and
94+
# input to custom_sdpa is transposed.
95+
from executorch.extension.llm.export.export_passes import (
96+
RemoveRedundantTransposes,
5497
)
55-
dynamic_shapes = {
56-
"input_ids": {
57-
1: torch.export.Dim("sequence_length", min=1, max=args.seq_len)
58-
}
59-
}
98+
99+
mutated_gm = RemoveRedundantTransposes()(exported_program.module())[0]
60100

61101
xnnpack_quant_config = get_symmetric_quantization_config(
62102
is_per_channel=True, is_dynamic=True
63103
)
64104
xnnpack_quantizer = XNNPACKQuantizer()
65105
xnnpack_quantizer.set_global(xnnpack_quant_config)
66106

67-
model = export_for_training(
68-
model, example_inputs, dynamic_shapes=dynamic_shapes, strict=True
69-
).module()
70-
model = prepare_pt2e(model, xnnpack_quantizer) # pyre-fixme[6]
71-
model(*example_inputs)
72-
model = convert_pt2e(model)
73-
DuplicateDynamicQuantChainPass()(model)
74-
# TODO(lunwenh): update it to use export once
75-
# https://github.com/pytorch/pytorch/issues/128394 is resolved.
76-
model = torch.export._trace._export(
77-
model,
78-
example_inputs,
79-
dynamic_shapes=dynamic_shapes,
80-
strict=False,
81-
pre_dispatch=False,
107+
gm = prepare_pt2e(mutated_gm, xnnpack_quantizer) # pyre-fixme[6]
108+
gm(*example_inputs)
109+
gm = convert_pt2e(gm)
110+
DuplicateDynamicQuantChainPass()(gm)
111+
exported_program = export_for_training(
112+
gm, example_inputs, dynamic_shapes=dynamic_shapes, strict=False
82113
)
83114

84115
edge_config = get_xnnpack_edge_compile_config()
85-
edge_manager = to_edge(model, compile_config=edge_config)
116+
edge_manager = to_edge_transform_and_lower(
117+
exported_program,
118+
partitioner=[XnnpackPartitioner()],
119+
compile_config=edge_config,
120+
constant_methods={
121+
"get_eos_ids": [32000],
122+
"use_kv_cache": True,
123+
"enable_dynamic_shape": True,
124+
"get_max_seq_len": model.config.max_position_embeddings - 1,
125+
},
126+
)
86127
edge_manager = edge_manager.to_backend(XnnpackPartitioner())
87-
et_program = edge_manager.to_executorch()
128+
et_program = edge_manager.to_executorch(
129+
ExecutorchBackendConfig(
130+
extract_delegate_segments=True,
131+
do_quant_fusion_and_const_prop=True,
132+
memory_planning_pass=MemoryPlanningPass(alloc_graph_input=False),
133+
sym_shape_eval_pass=ConstraintBasedSymShapeEvalPass(),
134+
)
135+
)
88136

89137
with open(args.output_name, "wb") as file:
90138
file.write(et_program.buffer)

examples/models/phi-3-mini/main.cpp

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,12 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <executorch/extension/llm/runner/text_llm_runner.h>
910
#include <gflags/gflags.h>
11+
#include <pytorch/tokenizers/llama2c_tokenizer.h>
12+
#include <iostream>
1013

11-
#include <executorch/examples/models/phi-3-mini/runner.h>
14+
using executorch::extension::llm::TextLLMRunner;
1215

1316
DEFINE_string(
1417
model_path,
@@ -42,9 +45,17 @@ int main(int32_t argc, char** argv) {
4245

4346
int32_t seq_len = FLAGS_seq_len;
4447

45-
example::Runner runner(model_path, tokenizer_path, temperature);
46-
47-
runner.generate(prompt, seq_len);
48+
std::unique_ptr<tokenizers::Tokenizer> tokenizer =
49+
std::make_unique<tokenizers::Llama2cTokenizer>();
50+
tokenizer->load(tokenizer_path);
51+
std::cout << "Tokenizer loaded, eos_id = " << tokenizer->eos_tok()
52+
<< std::endl;
53+
auto runner = executorch::extension::llm::create_text_llm_runner(
54+
model_path, std::move(tokenizer));
55+
56+
runner->generate(
57+
prompt,
58+
{.seq_len = seq_len, .temperature = static_cast<float>(temperature)});
4859

4960
return 0;
5061
}

examples/models/phi-3-mini/phi_3_mini.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,11 +30,13 @@ def __init__(self, model: Phi3ForCausalLM, max_batch_size: int, max_seq_len: int
3030
def forward(
3131
self,
3232
# pyre-fixme[9]: input_ids has type `LongTensor`; used as `None`.
33-
input_ids: torch.LongTensor = None,
33+
input_ids: torch.LongTensor,
34+
cache_positions: torch.Tensor,
3435
) -> torch.FloatTensor:
3536
# pyre-fixme[16]: `Phi3ForCausalLM` has no attribute `forward`.
3637
return self.model.forward(
3738
input_ids=input_ids,
39+
cache_positions=cache_positions,
3840
use_cache=True,
3941
return_dict=True,
4042
past_key_values=self.cache,

0 commit comments

Comments
 (0)