File tree Expand file tree Collapse file tree 5 files changed +10
-6
lines changed
Expand file tree Collapse file tree 5 files changed +10
-6
lines changed Original file line number Diff line number Diff line change @@ -23,14 +23,14 @@ speaker = "speaker2"
2323# lang = "zh"
2424# api_key = "gsk_xxx"
2525# model = "whisper-large-v3-turbo"
26+ # vad_url = "http://localhost:9093/v1/audio/vad"
2627# vad_realtime_url = "ws://localhost:9093/v1/audio/realtime_vad"
2728
2829[asr ]
2930url = " https://whisper.gaia.domains/v1/audio/transcriptions"
3031lang = " auto"
3132prompt = " Hello\n 你好\n (noise)\n (bgm)\n (silence)\n "
32- # vad_url = "http://localhost:9093/v1/audio/vad"
33- vad_realtime_url = " ws://localhost:9093/v1/audio/realtime_vad"
33+ vad_url = " http://localhost:9093/v1/audio/vad"
3434
3535# [llm]
3636# llm_chat_url = "https://api.groq.com/openai/v1/chat/completions"
Original file line number Diff line number Diff line change @@ -15,7 +15,8 @@ lang = "en"
1515prompt = " Hello\n 你好\n (noise)\n (bgm)\n (silence)\n "
1616# Requires a local Silero VAD server: https://github.com/second-state/silero_vad_server
1717# Default port is 8000, but some setups may use 9093. Update the port below if needed.
18- vad_realtime_url = " ws://localhost:8000/v1/audio/realtime_vad"
18+ vad_url = " http://localhost:8000/v1/audio/vad"
19+ # vad_realtime_url = "ws://localhost:8000/v1/audio/realtime_vad"
1920
2021[llm ]
2122llm_chat_url = " https://api.groq.com/openai/v1/chat/completions"
Original file line number Diff line number Diff line change @@ -12,7 +12,8 @@ speaker = "cooper"
1212url = " http://localhost:9092/v1/audio/transcriptions"
1313lang = " auto"
1414# Requires a local Silero VAD server at port 9093: https://github.com/second-state/silero_vad_server
15- vad_realtime_url = " ws://localhost:9093/v1/audio/realtime_vad"
15+ vad_url = " http://localhost:9093/v1/audio/vad"
16+ # vad_realtime_url = "ws://localhost:9093/v1/audio/realtime_vad"
1617
1718# Requires a local LlamaEdge API server at port 9091: https://llamaedge.com/docs/ai-models/llm/quick-start-llm
1819[llm ]
Original file line number Diff line number Diff line change @@ -14,7 +14,8 @@ model = "whisper-large-v3"
1414lang = " en"
1515prompt = " Hello\n 你好\n (noise)\n (bgm)\n (silence)\n "
1616# Requires a local Silero VAD server: https://github.com/second-state/silero_vad_server
17- vad_realtime_url = " ws://localhost:9093/v1/audio/realtime_vad"
17+ vad_url = " http://localhost:9093/v1/audio/vad"
18+ # vad_realtime_url = "ws://localhost:9093/v1/audio/realtime_vad"
1819
1920[llm ]
2021llm_chat_url = " https://api.groq.com/openai/v1/chat/completions"
Original file line number Diff line number Diff line change @@ -15,7 +15,8 @@ model = "whisper-large-v3"
1515lang = " en"
1616prompt = " Hello\n 你好\n (noise)\n (bgm)\n (silence)\n "
1717# Requires a local Silero VAD server at port 9093: https://github.com/second-state/silero_vad_server
18- vad_realtime_url = " ws://localhost:9093/v1/audio/realtime_vad"
18+ vad_url = " http://localhost:9093/v1/audio/vad"
19+ # vad_realtime_url = "ws://localhost:9093/v1/audio/realtime_vad"
1920
2021# Use Groq for fast LLM here
2122[llm ]
You can’t perform that action at this time.
0 commit comments