Skip to content

Commit 47acc87

Browse files
authored
Java Remove generateFromPos API (#14102)
No longer used
1 parent 5d63bad commit 47acc87

File tree

2 files changed

+2
-24
lines changed
  • examples/demo-apps/android/LlamaDemo/app/src/main/java/com/example/executorchllamademo
  • extension/android/executorch_android/src/main/java/org/pytorch/executorch/extension/llm

2 files changed

+2
-24
lines changed

examples/demo-apps/android/LlamaDemo/app/src/main/java/com/example/executorchllamademo/MainActivity.java

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -778,12 +778,8 @@ public void run() {
778778
mCurrentSettingsFields.getModelType(),
779779
mCurrentSettingsFields.getBackendType())
780780
== ModelUtils.VISION_MODEL) {
781-
mModule.generateFromPos(
782-
finalPrompt,
783-
ModelUtils.VISION_MODEL_SEQ_LEN,
784-
startPos,
785-
MainActivity.this,
786-
false);
781+
mModule.generate(
782+
finalPrompt, ModelUtils.VISION_MODEL_SEQ_LEN, MainActivity.this, false);
787783
} else if (mCurrentSettingsFields.getModelType() == ModelType.LLAMA_GUARD_3) {
788784
String llamaGuardPromptForClassification =
789785
PromptFormat.getFormattedLlamaGuardPrompt(rawPrompt);

extension/android/executorch_android/src/main/java/org/pytorch/executorch/extension/llm/LlmModule.java

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -218,24 +218,6 @@ public long prefillPrompt(String prompt, long startPos, int bos, int eos) {
218218
// returns a tuple of (status, updated startPos)
219219
private native int appendTextInput(String prompt, int bos, int eos);
220220

221-
/**
222-
* Generate tokens from the given prompt, starting from the given position.
223-
*
224-
* <p>This is a deprecated API. Please use {@link #generate(String, int, LlmCallback, boolean)}
225-
*
226-
* @param prompt The text prompt to LLaVA.
227-
* @param seqLen The total sequence length, including the prompt tokens and new tokens.
228-
* @param startPos The starting position in KV cache of the input in the LLM.
229-
* @param callback callback object to receive results.
230-
* @param echo indicate whether to echo the input prompt or not.
231-
* @return The error code.
232-
*/
233-
@Deprecated
234-
public int generateFromPos(
235-
String prompt, int seqLen, long startPos, LlmCallback callback, boolean echo) {
236-
return generate(prompt, seqLen, callback, echo);
237-
}
238-
239221
/**
240222
* Reset the context of the LLM. This will clear the KV cache and reset the state of the LLM.
241223
*

0 commit comments

Comments
 (0)