Skip to content

Commit e77d566

Browse files
committed
some tweaks and cleanup
1 parent 636beac commit e77d566

File tree

3 files changed

+8
-4
lines changed

3 files changed

+8
-4
lines changed

otherarch/tts_adapter.cpp

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -697,7 +697,7 @@ tts_generation_outputs ttstype_generate(const tts_generation_inputs inputs)
697697

698698
if(!inputs.quiet)
699699
{
700-
printf("\nTTS Generating (%d input tokens)...", prompt_inp.size());
700+
printf("\nTTS Processing (%d input tokens)...\n", prompt_inp.size());
701701
}
702702

703703
prompt_add(prompt_inp, model_ttc, "<|text_end|>\n<|audio_start|>\n", false, true);
@@ -771,6 +771,10 @@ tts_generation_outputs ttstype_generate(const tts_generation_inputs inputs)
771771
output.status = 0;
772772
return output;
773773
}
774+
if(!inputs.quiet)
775+
{
776+
printf("\rTTS Generating (%d AudioTokens)", n_decode);
777+
}
774778
}
775779

776780
if(!inputs.quiet && ttsdebugmode==1)
@@ -818,7 +822,7 @@ tts_generation_outputs ttstype_generate(const tts_generation_inputs inputs)
818822
audio[i] = 0.0f;
819823
}
820824
//add some silence at the end
821-
for (int i = 0; i < 24000/20; ++i) {
825+
for (int i = 0; i < 24000/10; ++i) {
822826
audio.push_back(0.0f);
823827
}
824828

src/llama-context.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch) {
8787
//GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs");
8888

8989
if (!lctx.inp_out_ids) {
90-
LLAMA_LOG_WARN("%s: 'lctx.inp_out_ids' is not created\n", __func__);
90+
//LLAMA_LOG_WARN("%s: 'lctx.inp_out_ids' is not created\n", __func__);
9191
} else {
9292
const int64_t n_tokens = ubatch.n_tokens;
9393

src/llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11124,7 +11124,7 @@ static int llama_decode_impl(
1112411124

1112511125
GGML_ASSERT_CONTINUE(n_tokens_all <= cparams.n_batch);
1112611126

11127-
GGML_ASSERT_CONTINUE((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens");
11127+
//GGML_ASSERT_CONTINUE((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens");
1112811128

1112911129
if (lctx.t_compute_start_us == 0) {
1113011130
lctx.t_compute_start_us = ggml_time_us();

0 commit comments

Comments
 (0)