Skip to content

Commit 68b7bc9

Browse files
author
litongmacos
committed
update and add whisper_params.cpp
1 parent e93346c commit 68b7bc9

File tree

4 files changed

+197
-114
lines changed

4 files changed

+197
-114
lines changed

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,6 @@ target_link_libraries(simplest whisper)
3737
add_executable(stream_components stream_components.cpp stream_components_audio.cpp stream_components_output.cpp stream_components_service.cpp)
3838
target_link_libraries(stream_components ${SDL2_LIBRARIES})
3939

40-
add_executable(server server.cpp whisper_params.h common.cpp httplib.h json.hpp inference_handler.cpp)
40+
add_executable(server server.cpp common.cpp httplib.h json.hpp inference_handler.cpp whisper_params.cpp)
4141
target_link_libraries(server whisper)
4242

README.md

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,85 @@ options:
1515

1616
Simple http server. WAV Files are passed to the inference model via http requests.
1717

18+
```
19+
./cmake-build-debug/server -m models/ggml-base.en.bin
20+
```
21+
22+
```shell
23+
whisper_init_from_file_with_params_no_state: loading model from 'models/ggml-base.en.bin'
24+
whisper_model_load: loading model
25+
whisper_model_load: n_vocab = 51864
26+
whisper_model_load: n_audio_ctx = 1500
27+
whisper_model_load: n_audio_state = 512
28+
whisper_model_load: n_audio_head = 8
29+
whisper_model_load: n_audio_layer = 6
30+
whisper_model_load: n_text_ctx = 448
31+
whisper_model_load: n_text_state = 512
32+
whisper_model_load: n_text_head = 8
33+
whisper_model_load: n_text_layer = 6
34+
whisper_model_load: n_mels = 80
35+
whisper_model_load: ftype = 1
36+
whisper_model_load: qntvr = 0
37+
whisper_model_load: type = 2 (base)
38+
whisper_model_load: adding 1607 extra tokens
39+
whisper_model_load: n_langs = 99
40+
whisper_backend_init: using Metal backend
41+
ggml_metal_init: allocating
42+
ggml_metal_init: found device: Apple M2
43+
ggml_metal_init: picking default device: Apple M2
44+
ggml_metal_init: default.metallib not found, loading from source
45+
ggml_metal_init: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd
46+
ggml_metal_init: loading 'ggml-metal.metal'
47+
ggml_metal_init: GPU name: Apple M2
48+
ggml_metal_init: GPU family: MTLGPUFamilyApple8 (1008)
49+
ggml_metal_init: hasUnifiedMemory = true
50+
ggml_metal_init: recommendedMaxWorkingSetSize = 11453.25 MB
51+
ggml_metal_init: maxTransferRate = built-in GPU
52+
ggml_metal_add_buffer: allocated 'backend ' buffer, size = 156.68 MB, ( 157.20 / 11453.25)
53+
whisper_model_load: Metal buffer size = 156.67 MB
54+
whisper_model_load: model size = 156.58 MB
55+
whisper_backend_init: using Metal backend
56+
ggml_metal_init: allocating
57+
ggml_metal_init: found device: Apple M2
58+
ggml_metal_init: picking default device: Apple M2
59+
ggml_metal_init: default.metallib not found, loading from source
60+
ggml_metal_init: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd
61+
ggml_metal_init: loading 'ggml-metal.metal'
62+
ggml_metal_init: GPU name: Apple M2
63+
ggml_metal_init: GPU family: MTLGPUFamilyApple8 (1008)
64+
ggml_metal_init: hasUnifiedMemory = true
65+
ggml_metal_init: recommendedMaxWorkingSetSize = 11453.25 MB
66+
ggml_metal_init: maxTransferRate = built-in GPU
67+
ggml_metal_add_buffer: allocated 'backend ' buffer, size = 16.52 MB, ( 173.72 / 11453.25)
68+
whisper_init_state: kv self size = 16.52 MB
69+
ggml_metal_add_buffer: allocated 'backend ' buffer, size = 18.43 MB, ( 192.15 / 11453.25)
70+
whisper_init_state: kv cross size = 18.43 MB
71+
whisper_init_state: loading Core ML model from 'models/ggml-base.en-encoder.mlmodelc'
72+
whisper_init_state: first run on a device may take a while ...
73+
whisper_init_state: Core ML model loaded
74+
ggml_metal_add_buffer: allocated 'backend ' buffer, size = 0.02 MB, ( 196.51 / 11453.25)
75+
whisper_init_state: compute buffer (conv) = 5.67 MB
76+
ggml_metal_add_buffer: allocated 'backend ' buffer, size = 0.02 MB, ( 196.53 / 11453.25)
77+
whisper_init_state: compute buffer (cross) = 4.71 MB
78+
ggml_metal_add_buffer: allocated 'backend ' buffer, size = 0.02 MB, ( 196.54 / 11453.25)
79+
whisper_init_state: compute buffer (decode) = 96.41 MB
80+
ggml_metal_add_buffer: allocated 'backend ' buffer, size = 4.05 MB, ( 200.59 / 11453.25)
81+
ggml_metal_add_buffer: allocated 'backend ' buffer, size = 3.08 MB, ( 203.67 / 11453.25)
82+
ggml_metal_add_buffer: allocated 'backend ' buffer, size = 94.78 MB, ( 298.45 / 11453.25)
83+
84+
whisper server listening at http://0.0.0.0:8080
85+
86+
Received request: jfk.wav
87+
Successfully loaded jfk.wav
88+
89+
system_info: n_threads = 4 / 8 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | METAL = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | SSSE3 = 0 | VSX = 0 | CUDA = 0 | COREML = 1 | OPENVINO = 0 |
90+
91+
handleInference: processing 'jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
92+
93+
Running whisper.cpp inference on jfk.wav
94+
95+
[00:00:00.000 --> 00:00:11.000] And so my fellow Americans, ask not what your country can do for you, ask what you can do for your country.
96+
```
1897
```
1998
./server -h
2099

whisper_params.cpp

Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
#include "whisper_params.h"
2+
3+
void whisper_print_usage(int /*argc*/, char **argv, const whisper_params &params,
4+
const server_params &sparams) {
5+
fprintf(stderr, "\n");
6+
fprintf(stderr, "usage: %s [options] \n", argv[0]);
7+
fprintf(stderr, "\n");
8+
fprintf(stderr, "options:\n");
9+
fprintf(stderr, " -h, --help [default] show this help message and exit\n");
10+
fprintf(stderr, " -t N, --threads N [%-7d] number of threads to use during computation\n",
11+
params.n_threads);
12+
fprintf(stderr, " -p N, --processors N [%-7d] number of processors to use during computation\n",
13+
params.n_processors);
14+
fprintf(stderr, " -ot N, --offset-t N [%-7d] time offset in milliseconds\n", params.offset_t_ms);
15+
fprintf(stderr, " -on N, --offset-n N [%-7d] segment index offset\n", params.offset_n);
16+
fprintf(stderr, " -d N, --duration N [%-7d] duration of audio to process in milliseconds\n",
17+
params.duration_ms);
18+
fprintf(stderr, " -mc N, --max-context N [%-7d] maximum number of text context tokens to store\n",
19+
params.max_context);
20+
fprintf(stderr, " -ml N, --max-len N [%-7d] maximum segment length in characters\n", params.max_len);
21+
fprintf(stderr, " -sow, --split-on-word [%-7s] split on word rather than on token\n",
22+
params.split_on_word ? "true" : "false");
23+
fprintf(stderr, " -bo N, --best-of N [%-7d] number of best candidates to keep\n", params.best_of);
24+
fprintf(stderr, " -bs N, --beam-size N [%-7d] beam size for beam search\n", params.beam_size);
25+
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n",
26+
params.word_thold);
27+
fprintf(stderr, " -et N, --entropy-thold N [%-7.2f] entropy threshold for decoder fail\n",
28+
params.entropy_thold);
29+
fprintf(stderr, " -lpt N, --logprob-thold N [%-7.2f] log probability threshold for decoder fail\n",
30+
params.logprob_thold);
31+
// fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
32+
fprintf(stderr, " -debug, --debug-mode [%-7s] enable debug mode (eg. dump log_mel)\n",
33+
params.debug_mode ? "true" : "false");
34+
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n",
35+
params.translate ? "true" : "false");
36+
fprintf(stderr, " -di, --diarize [%-7s] stereo audio diarization\n",
37+
params.diarize ? "true" : "false");
38+
fprintf(stderr, " -tdrz, --tinydiarize [%-7s] enable tinydiarize (requires a tdrz model)\n",
39+
params.tinydiarize ? "true" : "false");
40+
fprintf(stderr, " -nf, --no-fallback [%-7s] do not use temperature fallback while decoding\n",
41+
params.no_fallback ? "true" : "false");
42+
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n",
43+
params.print_special ? "true" : "false");
44+
fprintf(stderr, " -pc, --print-colors [%-7s] print colors\n", params.print_colors ? "true" : "false");
45+
fprintf(stderr, " -pp, --print-progress [%-7s] print progress\n",
46+
params.print_progress ? "true" : "false");
47+
fprintf(stderr, " -nt, --no-timestamps [%-7s] do not print timestamps\n",
48+
params.no_timestamps ? "true" : "false");
49+
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language ('auto' for auto-detect)\n",
50+
params.language.c_str());
51+
fprintf(stderr, " -dl, --detect-language [%-7s] exit after automatically detecting language\n",
52+
params.detect_language ? "true" : "false");
53+
fprintf(stderr, " --prompt PROMPT [%-7s] initial prompt\n", params.prompt.c_str());
54+
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
55+
fprintf(stderr, " -oved D, --ov-e-device DNAME [%-7s] the OpenVINO device used for encode inference\n",
56+
params.openvino_encode_device.c_str());
57+
// server params
58+
fprintf(stderr, " --host HOST, [%-7s] Hostname/ip-adress for the server\n",
59+
sparams.hostname.c_str());
60+
fprintf(stderr, " --port PORT, [%-7d] Port number for the server\n", sparams.port);
61+
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
62+
fprintf(stderr, "\n");
63+
}
64+
65+
66+
bool whisper_params_parse(int argc, char **argv, whisper_params &params, server_params &sparams) {
67+
for (int i = 1; i < argc; i++) {
68+
std::string arg = argv[i];
69+
70+
if (arg == "-h" || arg == "--help") {
71+
whisper_print_usage(argc, argv, params, sparams);
72+
exit(0);
73+
} else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); }
74+
else if (arg == "-p" || arg == "--processors") { params.n_processors = std::stoi(argv[++i]); }
75+
else if (arg == "-ot" || arg == "--offset-t") { params.offset_t_ms = std::stoi(argv[++i]); }
76+
else if (arg == "-on" || arg == "--offset-n") { params.offset_n = std::stoi(argv[++i]); }
77+
else if (arg == "-d" || arg == "--duration") { params.duration_ms = std::stoi(argv[++i]); }
78+
else if (arg == "-mc" || arg == "--max-context") { params.max_context = std::stoi(argv[++i]); }
79+
else if (arg == "-ml" || arg == "--max-len") { params.max_len = std::stoi(argv[++i]); }
80+
else if (arg == "-bo" || arg == "--best-of") { params.best_of = std::stoi(argv[++i]); }
81+
else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(argv[++i]); }
82+
else if (arg == "-wt" || arg == "--word-thold") { params.word_thold = std::stof(argv[++i]); }
83+
else if (arg == "-et" || arg == "--entropy-thold") { params.entropy_thold = std::stof(argv[++i]); }
84+
else if (arg == "-lpt" || arg == "--logprob-thold") { params.logprob_thold = std::stof(argv[++i]); }
85+
// else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
86+
else if (arg == "-debug" || arg == "--debug-mode") { params.debug_mode = true; }
87+
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
88+
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
89+
else if (arg == "-tdrz" || arg == "--tinydiarize") { params.tinydiarize = true; }
90+
else if (arg == "-sow" || arg == "--split-on-word") { params.split_on_word = true; }
91+
else if (arg == "-nf" || arg == "--no-fallback") { params.no_fallback = true; }
92+
else if (arg == "-fp" || arg == "--font-path") { params.font_path = argv[++i]; }
93+
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
94+
else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
95+
else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
96+
else if (arg == "-nt" || arg == "--no-timestamps") { params.no_timestamps = true; }
97+
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
98+
else if (arg == "-dl" || arg == "--detect-language") { params.detect_language = true; }
99+
else if (arg == "--prompt") { params.prompt = argv[++i]; }
100+
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
101+
else if (arg == "-oved" || arg == "--ov-e-device") { params.openvino_encode_device = argv[++i]; }
102+
// server params
103+
else if (arg == "--port") { sparams.port = std::stoi(argv[++i]); }
104+
else if (arg == "--host") { sparams.hostname = argv[++i]; }
105+
else if (arg == "-ad" || arg == "--port") { params.openvino_encode_device = argv[++i]; }
106+
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
107+
else {
108+
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
109+
whisper_print_usage(argc, argv, params, sparams);
110+
exit(0);
111+
}
112+
}
113+
114+
return true;
115+
}

0 commit comments

Comments
 (0)