@@ -43,6 +43,7 @@ TextLLMRunner::TextLLMRunner(
43
43
io_manager_(std::move(io_manager)),
44
44
text_token_generator_(std::move(text_token_generator)),
45
45
stats_(std::move(stats)),
46
+ pos_(0 ),
46
47
temperature_(temperature) {
47
48
// Note: This constructor assumes that text_prefiller and text_token_generator
48
49
// already have references to the Module and TextDecoderRunner they need
@@ -70,9 +71,8 @@ Error TextLLMRunner::load() {
70
71
ET_LOG (Info, format, __VA_ARGS__); \
71
72
}
72
73
73
- Error TextLLMRunner::generate_from_pos (
74
+ Error TextLLMRunner::generate (
74
75
const std::string& prompt,
75
- ET_UNUSED int64_t start_pos,
76
76
const GenerationConfig& config,
77
77
std::function<void (const std::string&)> token_callback,
78
78
std::function<void(const Stats&)> stats_callback) {
@@ -217,15 +217,6 @@ Error TextLLMRunner::generate_from_pos(
217
217
return Error::Ok;
218
218
}
219
219
220
- Error TextLLMRunner::generate (
221
- const std::string& prompt,
222
- const GenerationConfig& config,
223
- std::function<void (const std::string&)> token_callback,
224
- std::function<void(const Stats&)> stats_callback) {
225
- pos_ = 0 ;
226
- return generate_from_pos (prompt, 0 , config, token_callback, stats_callback);
227
- }
228
-
229
220
Error TextLLMRunner::warmup (const std::string& prompt, int32_t max_new_tokens) {
230
221
// Create a GenerationConfig for warmup
231
222
GenerationConfig config{
0 commit comments