|
| 1 | +# whisper-cpp server |
| 2 | + |
| 3 | +Simple http server. WAV Files are passed to the inference model via http requests. |
| 4 | + |
| 5 | +``` |
| 6 | +./server -h |
| 7 | +
|
| 8 | +usage: ./bin/server [options] |
| 9 | +
|
| 10 | +options: |
| 11 | + -h, --help [default] show this help message and exit |
| 12 | + -t N, --threads N [4 ] number of threads to use during computation |
| 13 | + -p N, --processors N [1 ] number of processors to use during computation |
| 14 | + -ot N, --offset-t N [0 ] time offset in milliseconds |
| 15 | + -on N, --offset-n N [0 ] segment index offset |
| 16 | + -d N, --duration N [0 ] duration of audio to process in milliseconds |
| 17 | + -mc N, --max-context N [-1 ] maximum number of text context tokens to store |
| 18 | + -ml N, --max-len N [0 ] maximum segment length in characters |
| 19 | + -sow, --split-on-word [false ] split on word rather than on token |
| 20 | + -bo N, --best-of N [2 ] number of best candidates to keep |
| 21 | + -bs N, --beam-size N [-1 ] beam size for beam search |
| 22 | + -wt N, --word-thold N [0.01 ] word timestamp probability threshold |
| 23 | + -et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail |
| 24 | + -lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail |
| 25 | + -debug, --debug-mode [false ] enable debug mode (eg. dump log_mel) |
| 26 | + -tr, --translate [false ] translate from source language to english |
| 27 | + -di, --diarize [false ] stereo audio diarization |
| 28 | + -tdrz, --tinydiarize [false ] enable tinydiarize (requires a tdrz model) |
| 29 | + -nf, --no-fallback [false ] do not use temperature fallback while decoding |
| 30 | + -ps, --print-special [false ] print special tokens |
| 31 | + -pc, --print-colors [false ] print colors |
| 32 | + -pp, --print-progress [false ] print progress |
| 33 | + -nt, --no-timestamps [false ] do not print timestamps |
| 34 | + -l LANG, --language LANG [en ] spoken language ('auto' for auto-detect) |
| 35 | + -dl, --detect-language [false ] exit after automatically detecting language |
| 36 | + --prompt PROMPT [ ] initial prompt |
| 37 | + -m FNAME, --model FNAME [models/ggml-base.en.bin] model path |
| 38 | + -oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference |
| 39 | + --host HOST, [127.0.0.1] Hostname/ip-adress for the server |
| 40 | + --port PORT, [8080 ] Port number for the server |
| 41 | +``` |
| 42 | + |
| 43 | +## request examples |
| 44 | + |
| 45 | +**/inference** |
| 46 | +``` |
| 47 | +curl 127.0.0.1:8080/inference \ |
| 48 | +-H "Content-Type: multipart/form-data" \ |
| 49 | +-F file="<file-path>" \ |
| 50 | +-F temperature="0.2" \ |
| 51 | +-F response-format="json" |
| 52 | +``` |
| 53 | + |
| 54 | +**/load** |
| 55 | +``` |
| 56 | +curl 127.0.0.1:8080/load \ |
| 57 | +-H "Content-Type: multipart/form-data" \ |
| 58 | +-F model="<path-to-model-file>" |
| 59 | +``` |
0 commit comments