Skip to content

Commit 9b6387f

Browse files
authored
No longer need generateFromPos API (#14101)
Just use generate()
1 parent 517d7b5 commit 9b6387f

File tree

2 files changed

+4
-35
lines changed

2 files changed

+4
-35
lines changed

extension/android/executorch_android/src/main/java/org/pytorch/executorch/extension/llm/LlmModule.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -231,8 +231,10 @@ public long prefillPrompt(String prompt, long startPos, int bos, int eos) {
231231
* @return The error code.
232232
*/
233233
@Deprecated
234-
public native int generateFromPos(
235-
String prompt, int seqLen, long startPos, LlmCallback callback, boolean echo);
234+
public int generateFromPos(
235+
String prompt, int seqLen, long startPos, LlmCallback callback, boolean echo) {
236+
return generate(prompt, seqLen, callback, echo);
237+
}
236238

237239
/**
238240
* Reset the context of the LLM. This will clear the KV cache and reset the state of the LLM.

extension/android/jni/jni_layer_llama.cpp

Lines changed: 0 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -290,37 +290,6 @@ class ExecuTorchLlmJni : public facebook::jni::HybridClass<ExecuTorchLlmJni> {
290290
return 0;
291291
}
292292

293-
jint generate_from_pos(
294-
facebook::jni::alias_ref<jstring> prompt,
295-
jint seq_len,
296-
jlong start_pos,
297-
facebook::jni::alias_ref<ExecuTorchLlmCallbackJni> callback,
298-
jboolean echo) {
299-
if (model_type_category_ == MODEL_TYPE_CATEGORY_MULTIMODAL) {
300-
std::vector<llm::MultimodalInput> inputs = prefill_inputs_;
301-
prefill_inputs_.clear();
302-
inputs.emplace_back(llm::MultimodalInput{prompt->toStdString()});
303-
return static_cast<jint>(multi_modal_runner_->generate(
304-
inputs,
305-
llm::GenerationConfig{
306-
.echo = static_cast<bool>(echo), .seq_len = seq_len},
307-
[callback](const std::string& result) { callback->onResult(result); },
308-
[callback](const llm::Stats& stats) { callback->onStats(stats); }));
309-
} else if (model_type_category_ == MODEL_TYPE_CATEGORY_LLM) {
310-
executorch::extension::llm::GenerationConfig config{
311-
.echo = static_cast<bool>(echo),
312-
.seq_len = seq_len,
313-
.temperature = temperature_,
314-
};
315-
return static_cast<jint>(runner_->generate(
316-
prompt->toStdString(),
317-
config,
318-
[callback](std::string result) { callback->onResult(result); },
319-
[callback](const llm::Stats& stats) { callback->onStats(stats); }));
320-
}
321-
return static_cast<jint>(executorch::runtime::Error::InvalidArgument);
322-
}
323-
324293
void stop() {
325294
if (model_type_category_ == MODEL_TYPE_CATEGORY_MULTIMODAL) {
326295
multi_modal_runner_->stop();
@@ -357,8 +326,6 @@ class ExecuTorchLlmJni : public facebook::jni::HybridClass<ExecuTorchLlmJni> {
357326
"appendImagesInput", ExecuTorchLlmJni::append_images_input),
358327
makeNativeMethod(
359328
"appendTextInput", ExecuTorchLlmJni::append_text_input),
360-
makeNativeMethod(
361-
"generateFromPos", ExecuTorchLlmJni::generate_from_pos),
362329
makeNativeMethod("resetContext", ExecuTorchLlmJni::reset_context),
363330
});
364331
}

0 commit comments

Comments
 (0)