@@ -27,20 +27,20 @@ static constexpr auto kDecoderStartTokenId = "decoder_start_token_id";
27
27
static constexpr auto kEosId = " get_eos_id" ;
28
28
static constexpr auto kMaxContextLen = " get_max_context_len" ;
29
29
} // namespace
30
- Runner::Runner (
30
+ WhisperRunner::WhisperRunner (
31
31
const std::string& model_path,
32
32
const std::string& tokenizer_json_path)
33
33
: tokenizer_json_path_(tokenizer_json_path) {
34
34
encoder_ = std::make_unique<WhisperEncoder>(model_path);
35
35
decoder_ = std::make_unique<WhisperDecoder>(model_path);
36
36
tokenizer_ = std::make_unique<tokenizers::HFTokenizer>();
37
37
}
38
- bool Runner ::is_loaded () const {
38
+ bool WhisperRunner ::is_loaded () const {
39
39
return encoder_->is_method_loaded () && decoder_->is_method_loaded () &&
40
40
tokenizer_->is_loaded () && sampler_;
41
41
}
42
42
43
- Error Runner ::load () {
43
+ Error WhisperRunner ::load () {
44
44
if (is_loaded ()) {
45
45
return Error::Ok;
46
46
}
@@ -108,12 +108,12 @@ Error Runner::load() {
108
108
109
109
return Error::Ok;
110
110
}
111
- uint64_t Runner ::logits_to_token (
111
+ uint64_t WhisperRunner ::logits_to_token (
112
112
const executorch::aten::Tensor& logits_tensor) {
113
113
return sampler_->sample (logits_tensor.data_ptr <float >());
114
114
}
115
115
116
- Error Runner ::transcribe (
116
+ Error WhisperRunner ::transcribe (
117
117
int32_t seq_len,
118
118
std::vector<std::vector<char >>& inputs,
119
119
std::function<void (const std::string&)> token_callback) {
@@ -184,7 +184,7 @@ Error Runner::transcribe(
184
184
return Error::Ok;
185
185
}
186
186
187
- Error Runner ::print_performance () {
187
+ Error WhisperRunner ::print_performance () {
188
188
ET_LOG (Info, " \t Total Generated token:\t\t\t\t %ld" , num_generated_token_);
189
189
190
190
ET_LOG (
0 commit comments