Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions examples/qualcomm/oss_scripts/llama/runner/runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -464,6 +464,13 @@ Error Runner<T>::generate_from_prompt_or_file(
return Error::Ok;
}

template <typename T>
::executorch::runtime::Error Runner<T>::prefill(
const std::string& prompt,
const executorch::extension::llm::GenerationConfig& config) {
return ::Error::NotImplemented;
}

template <typename T>
Result<DecoderModelVersion> Runner<T>::get_decoder_model_version() {
if (!is_loaded()) {
Expand Down
4 changes: 4 additions & 0 deletions examples/qualcomm/oss_scripts/llama/runner/runner.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,10 @@ class Runner : public executorch::extension::llm::IRunner {
const executorch::extension::llm::GenerationConfig& config,
std::function<void(const std::string&)> token_callback = {},
std::function<void(const executorch::llm::Stats&)> stats_callback = {});

executorch::runtime::Error prefill(
const std::string& prompt,
const executorch::extension::llm::GenerationConfig& config = {}) override;
void stop() override {};
void reset() override {};
executorch::runtime::Result<DecoderModelVersion> get_decoder_model_version();
Expand Down
61 changes: 49 additions & 12 deletions extension/android/jni/jni_layer_llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,6 @@ class ExecuTorchLlmJni : public facebook::jni::HybridClass<ExecuTorchLlmJni> {
std::unique_ptr<llm::IRunner> runner_;
std::unique_ptr<executorch::extension::llm::MultimodalRunner>
multi_modal_runner_;
std::vector<llm::MultimodalInput> prefill_inputs_;

public:
constexpr static auto kJavaDescriptor =
Expand Down Expand Up @@ -213,8 +212,7 @@ class ExecuTorchLlmJni : public facebook::jni::HybridClass<ExecuTorchLlmJni> {
facebook::jni::alias_ref<ExecuTorchLlmCallbackJni> callback,
jboolean echo) {
if (model_type_category_ == MODEL_TYPE_CATEGORY_MULTIMODAL) {
std::vector<llm::MultimodalInput> inputs = prefill_inputs_;
prefill_inputs_.clear();
std::vector<llm::MultimodalInput> inputs;
if (!prompt->toStdString().empty()) {
inputs.emplace_back(llm::MultimodalInput{prompt->toStdString()});
}
Expand Down Expand Up @@ -245,17 +243,28 @@ class ExecuTorchLlmJni : public facebook::jni::HybridClass<ExecuTorchLlmJni> {

// Returns status_code
// Contract is valid within an AAR (JNI + corresponding Java code)
jint append_text_input(facebook::jni::alias_ref<jstring> prompt) {
prefill_inputs_.emplace_back(llm::MultimodalInput{prompt->toStdString()});
return 0;
jint prefill_text_input(facebook::jni::alias_ref<jstring> prompt) {
if (model_type_category_ == MODEL_TYPE_CATEGORY_LLM) {
runner_->prefill(prompt->toStdString(), {});
return 0;
} else if (model_type_category_ == MODEL_TYPE_CATEGORY_MULTIMODAL) {
multi_modal_runner_->prefill(
{llm::MultimodalInput{prompt->toStdString()}});
return 0;
}
}

// Returns status_code
jint append_images_input(
jint prefill_images_input(
facebook::jni::alias_ref<jintArray> image,
jint width,
jint height,
jint channels) {
if (model_type_category_ != MODEL_TYPE_CATEGORY_MULTIMODAL) {
return static_cast<jint>(Error::InvalidArgument);
}
if (image == nullptr) {
return static_cast<jint>(Error::InvalidArgument);
}
std::vector<llm::Image> images;
if (image == nullptr) {
return static_cast<jint>(Error::EndOfMethod);
Expand All @@ -269,13 +278,39 @@ class ExecuTorchLlmJni : public facebook::jni::HybridClass<ExecuTorchLlmJni> {
image_data[i] = image_data_jint[i];
}
llm::Image image_runner{std::move(image_data), width, height, channels};
prefill_inputs_.emplace_back(
llm::MultimodalInput{std::move(image_runner)});
multi_modal_runner_->prefill(
{llm::MultimodalInput{std::move(image_runner)}});
}

return 0;
}

jint prefill_audio_input(
facebook::jni::alias_ref<jintArray> audio,
jint batch_size,
jint n_channels,
jint n_samples) {
if (model_type_category_ != MODEL_TYPE_CATEGORY_MULTIMODAL) {
return static_cast<jint>(Error::InvalidArgument);
}
if (audio == nullptr) {
return static_cast<jint>(Error::InvalidArgument);
}
auto audio_size = audio->size();
std::vector<uint8_t> audio_data(audio_size);
if (audio_size != 0) {
std::vector<jint> audio_data_jint(audio_size);
audio->getRegion(0, audio_size, audio_data_jint.data());
for (int i = 0; i < audio_size; i++) {
audio_data[i] = audio_data_jint[i];
}
llm::RawAudio audio_input{audio_data, batch_size, n_channels, n_samples};
multi_modal_runner_->prefill(
{llm::MultimodalInput{std::move(audio_input)}});
}
return 0;
}

void stop() {
if (model_type_category_ == MODEL_TYPE_CATEGORY_MULTIMODAL) {
multi_modal_runner_->stop();
Expand Down Expand Up @@ -309,9 +344,11 @@ class ExecuTorchLlmJni : public facebook::jni::HybridClass<ExecuTorchLlmJni> {
makeNativeMethod("stop", ExecuTorchLlmJni::stop),
makeNativeMethod("load", ExecuTorchLlmJni::load),
makeNativeMethod(
"appendImagesInput", ExecuTorchLlmJni::append_images_input),
"appendImagesInput", ExecuTorchLlmJni::prefill_images_input),
makeNativeMethod(
"appendTextInput", ExecuTorchLlmJni::prefill_text_input),
makeNativeMethod(
"appendTextInput", ExecuTorchLlmJni::append_text_input),
"appendAudioInput", ExecuTorchLlmJni::prefill_audio_input),
makeNativeMethod("resetContext", ExecuTorchLlmJni::reset_context),
});
}
Expand Down
11 changes: 11 additions & 0 deletions extension/llm/runner/irunner.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,17 @@ class ET_EXPERIMENTAL IRunner {
std::function<void(const std::string&)> token_callback,
std::function<void(const Stats&)> stats_callback) = 0;

/**
* Prefill text inputs, for example to reload chat history.
* @param prompt Text prompt to prefill.
* @param config Configuration parameters (if non-zero num_bos and num_eos
* used)
* @return The error code. KV cache position is tracked internally in pos_.
*/
virtual ::executorch::runtime::Error prefill(
const std::string& prompt,
const GenerationConfig& config = {}) = 0;

/**
* Stop the generation process.
*/
Expand Down
22 changes: 22 additions & 0 deletions extension/llm/runner/text_llm_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,28 @@ Error TextLLMRunner::generate(
return Error::Ok;
}

Error TextLLMRunner::prefill(
const std::string& prompt,
const GenerationConfig& config) {
if (!is_loaded()) {
ET_CHECK_OK_OR_RETURN_ERROR(load());
}

::tokenizers::Result<std::vector<uint64_t>> encode_res = tokenizer_->encode(
prompt,
/*bos=*/config.num_bos,
/*eos=*/config.num_eos);

ET_CHECK_TK_OK_OR_RETURN_ERROR(
encode_res.error(), "Failed to encode prompt %s", prompt.c_str());

// encode the (string) prompt into tokens sequence
std::vector<uint64_t> prompt_tokens = encode_res.get();
auto prefill_res = text_prefiller_->prefill(prompt_tokens, pos_);
ET_CHECK_OK_OR_RETURN_ERROR(prefill_res.error());
return Error::Ok;
}

Error TextLLMRunner::warmup(const std::string& prompt, int32_t max_new_tokens) {
// Create a GenerationConfig for warmup
GenerationConfig config{
Expand Down
11 changes: 11 additions & 0 deletions extension/llm/runner/text_llm_runner.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,17 @@ class ET_EXPERIMENTAL TextLLMRunner : public IRunner {
std::function<void(const std::string&)> token_callback = {},
std::function<void(const Stats&)> stats_callback = {}) override;

/**
* Prefill text inputs, for example to reload chat history.
* @param prompt Text prompt to prefill.
* @param config Configuration parameters (if non-zero num_bos and num_eos
* used)
* @return The error code. KV cache position is tracked internally in pos_.
*/
::executorch::runtime::Error prefill(
const std::string& prompt,
const GenerationConfig& config = {}) override;

/**
* @brief Warms up the model with a sample prompt
*
Expand Down
Loading