Skip to content

Commit 4cec3de

Browse files
committed
feat: adding STT->LLM->TTS
Fix Fix Fix Improve perf Adding concurrent pipeline Fixing clippy errors Fix
1 parent 56a4dde commit 4cec3de

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+13288
-133
lines changed

Cargo.lock

Lines changed: 65 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,10 @@ tracing-appender = "0.2"
5858
# CLI and utilities
5959
clap = { version = "4.5", features = ["derive"] }
6060
directories = "5.0" # Professional cross-platform directory discovery
61-
chrono = "0.4"
61+
chrono = { version = "0.4", features = ["serde"] }
62+
uuid = { version = "1.11", features = ["v4", "serde"] }
63+
walkdir = "2.5"
64+
regex = "1.11"
6265

6366
# HTTP client for model downloads
6467
reqwest = { version = "0.12", features = [
@@ -83,34 +86,37 @@ sysinfo = "0.33"
8386
# Platform integration
8487
handy-keys = "0.2.1" # Cross-platform global keyboard shortcuts
8588
enigo = "0.6" # Keyboard/mouse simulation for text injection
89+
llama-cpp-2 = { version = "0.1.138", default-features = false, optional = true }
8690

8791
# Unix-specific dependencies
8892
[target.'cfg(unix)'.dependencies]
8993
libc = "0.2"
9094

9195
[features]
92-
default = ["whisper-cpp", "onnx", "overlay-indicator"]
96+
default = ["whisper-cpp", "onnx", "llama-cpp", "overlay-indicator"]
9397

94-
# Model backends
98+
# Model backends (all default now)
9599
whisper-cpp = ["whisper-rs"]
96100
onnx = ["ort", "ort-sys", "ndarray"]
97101
candle = ["candle-core", "candle-nn", "candle-transformers"]
102+
llama-cpp = ["llama-cpp-2"]
98103

99-
# GPU acceleration backends (automatically enabled on macOS by default)
100-
metal = ["whisper-rs/metal"] # macOS GPU acceleration (M1/M2/M3/M4)
101-
cuda = ["whisper-rs/cuda"] # NVIDIA GPU acceleration
104+
# GPU acceleration backends (optional - use --features metal or --features cuda)
105+
metal = ["whisper-rs/metal", "llama-cpp-2?/metal"] # macOS GPU acceleration (M1/M2/M3/M4)
106+
cuda = ["whisper-rs/cuda", "llama-cpp-2?/cuda"] # NVIDIA GPU acceleration
102107
vulkan = ["whisper-rs/vulkan"] # Cross-platform GPU (AMD, Intel, etc)
103-
openblas = ["whisper-rs/openblas"] # CPU optimization
108+
openblas = ["whisper-rs/openblas", "llama-cpp-2?/openmp"] # CPU optimization
104109

105110
# VAD backends
106111
webrtc-vad = []
107112
cargo-clippy = []
108113

109-
# Features
114+
# UI features
110115
tui = ["ratatui", "crossterm"]
111116
gpu = []
112117
overlay-indicator = ["eframe", "winit"]
113118

119+
114120
[target.'cfg(target_os = "macos")'.dependencies]
115121
core-graphics = "0.23"
116122
core-foundation = "0.9"
@@ -146,7 +152,7 @@ features = ["copy-dylibs"] # Copy ONNX Runtime binaries to target directory
146152
version = "2.0.0-rc.11"
147153
optional = true
148154
default-features = false
149-
features = ["download-binaries", "tls-rustls"] # Download and use rustls for TLS
155+
features = ["download-binaries", "tls-rustls", "coreml"] # Added coreml for macOS GPU acceleration
150156

151157
[dependencies.ndarray]
152158
version = "0.16"

0 commit comments

Comments
 (0)