|
13 | 13 | #include <unordered_map>
|
14 | 14 | #include <vector>
|
15 | 15 |
|
16 |
| -#include <executorch/examples/models/llama/runner/runner.h> |
17 | 16 | #include <executorch/examples/models/llava/runner/llava_runner.h>
|
18 |
| -#include <executorch/examples/qualcomm/oss_scripts/llama/runner/runner.h> |
19 | 17 | #include <executorch/extension/llm/runner/image.h>
|
20 | 18 | #include <executorch/extension/llm/runner/irunner.h>
|
| 19 | +#include <executorch/extension/llm/runner/llm_runner_helper.h> |
| 20 | +#include <executorch/extension/llm/runner/multimodal_input.h> |
| 21 | +#include <executorch/extension/llm/runner/multimodal_runner.h> |
| 22 | +#include <executorch/extension/llm/runner/text_llm_runner.h> |
21 | 23 | #include <executorch/runtime/platform/log.h>
|
22 | 24 | #include <executorch/runtime/platform/platform.h>
|
23 | 25 | #include <executorch/runtime/platform/runtime.h>
|
@@ -174,11 +176,9 @@ class ExecuTorchLlmJni : public facebook::jni::HybridClass<ExecuTorchLlmJni> {
|
174 | 176 | std::optional<const std::string> data_path_str = data_path
|
175 | 177 | ? std::optional<const std::string>{data_path->toStdString()}
|
176 | 178 | : std::nullopt;
|
177 |
| - // TODO(larryliu0820): Use the API in text_llm_runner.h to create the |
178 |
| - // runner. |
179 |
| - runner_ = example::create_llama_runner( |
| 179 | + runner_ = executorch::extension::llm::create_text_llm_runner( |
180 | 180 | model_path->toStdString(),
|
181 |
| - tokenizer_path->toStdString(), |
| 181 | + llm::load_tokenizer(tokenizer_path->toStdString()), |
182 | 182 | data_path_str);
|
183 | 183 | #if defined(EXECUTORCH_BUILD_QNN)
|
184 | 184 | } else if (model_type_category == MODEL_TYPE_QNN_LLAMA) {
|
|
0 commit comments