Skip to content

Commit 717257a

Browse files
Fix naming consistency of generateTokensXXX methods and add comments
1 parent 5f3b6c2 commit 717257a

File tree

3 files changed

+7
-6
lines changed

3 files changed

+7
-6
lines changed

src/main/java/com/example/inference/InferenceEngine.java

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,10 @@
2525
* <p>
2626
* It provides unified logic for the following methods:
2727
* <ul>
28-
* <li>{@code generateTokensLlama} – for LLaMA and Mistral models running on CPU</li>
29-
* <li>{@code generateTokensQwen3} – for Qwen3 models running on CPU</li>
30-
* <li>{@code generateTokensGPU} – for models executed on GPU</li>
28+
* <li>{@link #generateTokensLlama} – for LLaMA and Mistral models running on CPU</li>
29+
* <li>{@link #generateTokensGPULlama} – for LLaMA and Mistral models executed on GPU</li>
30+
* <li>{@link #generateTokensQwen3} – for Qwen3 models running on CPU</li>
31+
* <li>{@link #generateTokensGPUQwen3} – for Qwen3 models executed on GPU</li>
3132
* </ul>
3233
* </p>
3334
*/
@@ -213,7 +214,7 @@ public static List<Integer> generateTokensQwen3(Model model, State state, int st
213214
return generatedTokens;
214215
}
215216

216-
public static List<Integer> generateTokensGPU(Model model, State state, int startPosition, List<Integer> promptTokens, Set<Integer> stopTokens, int maxTokens, Sampler sampler, boolean echo,
217+
public static List<Integer> generateTokensGPULlama(Model model, State state, int startPosition, List<Integer> promptTokens, Set<Integer> stopTokens, int maxTokens, Sampler sampler, boolean echo,
217218
IntConsumer onTokenGenerated, TornadoVMMasterPlan tornadoVMPlan) {
218219
// === Setup and Initialization ===
219220
long startNanos = System.nanoTime();

src/main/java/com/example/model/llama/Llama.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ public List<Integer> generateTokens(State state, int startPosition, List<Integer
6969
@Override
7070
public List<Integer> generateTokensGPU(State state, int startPosition, List<Integer> promptTokens, Set<Integer> stopTokens, int maxTokens, Sampler sampler, boolean echo,
7171
IntConsumer onTokenGenerated, TornadoVMMasterPlan tornadoVMPlan) {
72-
return InferenceEngine.generateTokensGPU(this, state, startPosition, promptTokens, stopTokens, maxTokens, sampler, echo, onTokenGenerated, tornadoVMPlan);
72+
return InferenceEngine.generateTokensGPULlama(this, state, startPosition, promptTokens, stopTokens, maxTokens, sampler, echo, onTokenGenerated, tornadoVMPlan);
7373
}
7474
}
7575

src/main/java/com/example/model/mistral/Mistral.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ public List<Integer> generateTokens(State state, int startPosition, List<Integer
6666
@Override
6767
public List<Integer> generateTokensGPU(State state, int startPosition, List<Integer> promptTokens, Set<Integer> stopTokens, int maxTokens, Sampler sampler, boolean echo,
6868
IntConsumer onTokenGenerated, TornadoVMMasterPlan tornadoVMPlan) {
69-
return InferenceEngine.generateTokensGPU(this, state, startPosition, promptTokens, stopTokens, maxTokens, sampler, echo, onTokenGenerated, tornadoVMPlan);
69+
return InferenceEngine.generateTokensGPULlama(this, state, startPosition, promptTokens, stopTokens, maxTokens, sampler, echo, onTokenGenerated, tornadoVMPlan);
7070
}
7171

7272
}

0 commit comments

Comments
 (0)