Skip to content

Commit 0f6385e

Browse files
authored
Run Llava with MultimodalRunner (#14250)
### Summary Run Llava model with MultimodalRunner instead of LlavaRunner
1 parent 3584da9 commit 0f6385e

File tree

3 files changed

+59
-9
lines changed

3 files changed

+59
-9
lines changed

.ci/scripts/test_llava.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ run_and_verify() {
149149

150150
# verify result.txt
151151
RESULT=$(cat result.txt)
152-
EXPECTED_PREFIX="ASSISTANT: image captures a basketball game in progress, with"
152+
EXPECTED_PREFIX="ASSISTANT: The image captures a basketball game in progress, with"
153153

154154
if [[ "${RESULT}" == *"${EXPECTED_PREFIX}"* ]]; then
155155
echo "Expected result prefix: ${EXPECTED_PREFIX}"

examples/models/llava/main.cpp

Lines changed: 53 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,11 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9-
#include <executorch/examples/models/llava/runner/llava_runner.h>
9+
#include <executorch/extension/llm/runner/image.h>
10+
#include <executorch/extension/llm/runner/multimodal_input.h>
11+
#include <executorch/extension/llm/runner/multimodal_runner.h>
1012
#include <gflags/gflags.h>
13+
#include <pytorch/tokenizers/llama2c_tokenizer.h>
1114
#define STB_IMAGE_IMPLEMENTATION
1215
#include <stb_image.h>
1316
#define STB_IMAGE_RESIZE_IMPLEMENTATION
@@ -44,7 +47,10 @@ DEFINE_int32(
4447
-1,
4548
"Number of CPU threads for inference. Defaults to -1, which implies we'll use a heuristic to derive the # of performant cores for a specific device.");
4649

47-
using executorch::extension::llm::Image;
50+
using ::executorch::extension::llm::Image;
51+
using ::executorch::extension::llm::make_image_input;
52+
using ::executorch::extension::llm::make_text_input;
53+
using ::executorch::extension::llm::MultimodalInput;
4854

4955
void load_image(const std::string& image_path, Image& image) {
5056
int width, height, channels;
@@ -127,14 +133,54 @@ int32_t main(int32_t argc, char** argv) {
127133
->_unsafe_reset_threadpool(num_performant_cores);
128134
}
129135
#endif
130-
// create llama runner
131-
example::LlavaRunner runner(model_path, tokenizer_path, temperature);
136+
// Load tokenizer
137+
std::unique_ptr<::tokenizers::Tokenizer> tokenizer =
138+
std::make_unique<tokenizers::Llama2cTokenizer>();
139+
tokenizer->load(tokenizer_path);
140+
if (tokenizer == nullptr) {
141+
ET_LOG(Error, "Failed to load tokenizer from: %s", tokenizer_path);
142+
return 1;
143+
}
144+
145+
// Create multimodal runner
146+
std::unique_ptr<::executorch::extension::llm::MultimodalRunner> runner =
147+
::executorch::extension::llm::create_multimodal_runner(
148+
model_path, std::move(tokenizer));
149+
if (runner == nullptr) {
150+
ET_LOG(Error, "Failed to create multimodal runner");
151+
return 1;
152+
}
132153

154+
// Load runner
155+
auto load_error = runner->load();
156+
if (load_error != ::executorch::runtime::Error::Ok) {
157+
ET_LOG(Error, "Failed to load multimodal runner");
158+
return 1;
159+
}
160+
161+
// Prepare inputs
162+
static const char* kPresetPrompt =
163+
"A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: ";
133164
Image image;
134165
load_image(image_path, image);
135-
std::vector<Image> images = {image};
166+
std::vector<MultimodalInput> inputs = {
167+
make_text_input(std::string(kPresetPrompt)),
168+
make_image_input(image),
169+
make_text_input(std::string(prompt)),
170+
};
171+
172+
::executorch::extension::llm::GenerationConfig config;
173+
config.temperature = temperature;
174+
config.echo = true;
175+
176+
// Generate
177+
ET_LOG(Info, "Starting generation...");
178+
auto error = runner->generate(inputs, config);
179+
if (error != ::executorch::runtime::Error::Ok) {
180+
ET_LOG(Error, "Failed to generate with multimodal runner");
181+
return 1;
182+
}
136183

137-
// generate
138-
runner.generate(std::move(images), prompt, seq_len);
184+
printf("\n");
139185
return 0;
140186
}

extension/llm/runner/multimodal_runner.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,11 @@ Error MultimodalRunner::generate(
104104

105105
uint64_t prefill_next_token = 0;
106106
// Process multimodal inputs in order
107-
for (const MultimodalInput& input : inputs) {
107+
for (size_t i = 0; i < inputs.size(); ++i) {
108+
const MultimodalInput& input = inputs[i];
109+
if (config.echo && i == inputs.size() - 1 && input.is_text()) {
110+
wrapped_callback(input.get_text());
111+
}
108112
prefill_next_token = ET_UNWRAP(multimodal_prefiller_->prefill(input, pos_));
109113
}
110114

0 commit comments

Comments
 (0)