@@ -112,6 +112,7 @@ class ExecuTorchLlmCallbackJni
112112class ExecuTorchLlmJni : public facebook ::jni::HybridClass<ExecuTorchLlmJni> {
113113 private:
114114 friend HybridBase;
115+ float temperature_;
115116 int model_type_category_;
116117 std::unique_ptr<llm::IRunner> runner_;
117118 std::unique_ptr<llm::MultimodalRunner> multi_modal_runner_;
@@ -167,20 +168,17 @@ class ExecuTorchLlmJni : public facebook::jni::HybridClass<ExecuTorchLlmJni> {
167168 runner_ = std::make_unique<example::Runner>(
168169 model_path->toStdString ().c_str (),
169170 tokenizer_path->toStdString ().c_str (),
170- temperature,
171171 data_path->toStdString ().c_str ());
172172 } else {
173173 runner_ = std::make_unique<example::Runner>(
174174 model_path->toStdString ().c_str (),
175- tokenizer_path->toStdString ().c_str (),
176- temperature);
175+ tokenizer_path->toStdString ().c_str ());
177176 }
178177#if defined(EXECUTORCH_BUILD_MEDIATEK)
179178 } else if (model_type_category == MODEL_TYPE_MEDIATEK_LLAMA) {
180179 runner_ = std::make_unique<MTKLlamaRunner>(
181180 model_path->toStdString ().c_str (),
182- tokenizer_path->toStdString ().c_str (),
183- temperature);
181+ tokenizer_path->toStdString ().c_str ());
184182 // Interpret the model type as LLM
185183 model_type_category_ = MODEL_TYPE_CATEGORY_LLM;
186184#endif
@@ -220,6 +218,7 @@ class ExecuTorchLlmJni : public facebook::jni::HybridClass<ExecuTorchLlmJni> {
220218 executorch::extension::llm::GenerationConfig config{
221219 .echo = static_cast <bool >(echo),
222220 .seq_len = seq_len,
221+ .temperature = temperature_,
223222 };
224223 runner_->generate (
225224 prompt->toStdString (),
0 commit comments