Skip to content

Commit 33e38dd

Browse files
committed
fix ci with non-arm target
Signed-off-by: Yujong Lee <yujonglee.dev@gmail.com>
1 parent b23c644 commit 33e38dd

File tree

5 files changed

+94
-56
lines changed

5 files changed

+94
-56
lines changed

.github/workflows/desktop_ci.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ jobs:
9898
--exclude cactus \
9999
--exclude transcribe-cactus \
100100
--exclude llm-cactus \
101+
--exclude local-llm-core \
101102
--exclude tauri-plugin-analytics\
102103
--exclude tauri-plugin-apple-calendar \
103104
--exclude tauri-plugin-apple-contact \

crates/local-llm-core/Cargo.toml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ specta = ["dep:specta"]
99
[dependencies]
1010
hypr-file = { workspace = true }
1111
hypr-gguf = { workspace = true }
12-
hypr-llm-cactus = { workspace = true }
1312
hypr-lmstudio = { workspace = true }
1413

1514
axum = { workspace = true }
@@ -22,3 +21,6 @@ tower-http = { workspace = true, features = ["cors"] }
2221
tracing = { workspace = true }
2322

2423
specta = { workspace = true, optional = true }
24+
25+
[target.'cfg(target_arch = "aarch64")'.dependencies]
26+
hypr-llm-cactus = { workspace = true }

crates/local-llm-core/src/error.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ pub enum Error {
66
IoError(#[from] std::io::Error),
77
#[error(transparent)]
88
LmStudioError(#[from] hypr_lmstudio::Error),
9+
#[cfg(target_arch = "aarch64")]
910
#[error(transparent)]
1011
InferenceError(#[from] hypr_llm_cactus::Error),
1112
#[error("Model not downloaded")]

crates/local-llm-core/src/model.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,13 @@
1+
#[cfg(target_arch = "aarch64")]
12
pub static SUPPORTED_MODELS: &[SupportedModel] = &[
23
SupportedModel::Llama3p2_3bQ4,
34
SupportedModel::HyprLLM,
45
SupportedModel::Gemma3_4bQ4,
56
];
67

8+
#[cfg(not(target_arch = "aarch64"))]
9+
pub static SUPPORTED_MODELS: &[SupportedModel] = &[];
10+
711
#[derive(serde::Serialize, serde::Deserialize)]
812
#[cfg_attr(feature = "specta", derive(specta::Type))]
913
pub struct ModelInfo {
Lines changed: 85 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -1,75 +1,105 @@
1-
use std::net::Ipv4Addr;
2-
use std::path::Path;
1+
#[cfg(target_arch = "aarch64")]
2+
mod inner {
3+
use std::net::Ipv4Addr;
4+
use std::path::Path;
5+
6+
use axum::http::StatusCode;
7+
use axum::{Router, error_handling::HandleError};
8+
use hypr_llm_cactus::{CompleteService, ModelManagerBuilder};
9+
use tokio::net::TcpListener;
10+
use tower_http::cors::CorsLayer;
11+
12+
use crate::{Error, ModelSelection};
13+
14+
pub struct LlmServer {
15+
base_url: String,
16+
shutdown_tx: tokio::sync::watch::Sender<()>,
17+
task: tokio::task::JoinHandle<()>,
18+
}
319

4-
use axum::http::StatusCode;
5-
use axum::{Router, error_handling::HandleError};
6-
use hypr_llm_cactus::{CompleteService, ModelManagerBuilder};
7-
use tokio::net::TcpListener;
8-
use tower_http::cors::CorsLayer;
20+
impl LlmServer {
21+
pub async fn start(selection: &ModelSelection, models_dir: &Path) -> Result<Self, Error> {
22+
let file_path = selection.file_path(models_dir);
23+
let name = selection.display_name();
924

10-
use crate::{Error, ModelSelection};
25+
if !file_path.exists() {
26+
return Err(Error::ModelNotDownloaded);
27+
}
1128

12-
pub struct LlmServer {
13-
base_url: String,
14-
shutdown_tx: tokio::sync::watch::Sender<()>,
15-
task: tokio::task::JoinHandle<()>,
16-
}
29+
let manager = ModelManagerBuilder::default()
30+
.register(name.clone(), file_path)
31+
.default_model(name)
32+
.build();
1733

18-
impl LlmServer {
19-
pub async fn start(selection: &ModelSelection, models_dir: &Path) -> Result<Self, Error> {
20-
let file_path = selection.file_path(models_dir);
21-
let name = selection.display_name();
34+
let service = HandleError::new(CompleteService::new(manager), handle_error);
2235

23-
if !file_path.exists() {
24-
return Err(Error::ModelNotDownloaded);
25-
}
36+
let router = Router::new()
37+
.route_service("/v1/chat/completions", service)
38+
.layer(CorsLayer::permissive());
2639

27-
let manager = ModelManagerBuilder::default()
28-
.register(name.clone(), file_path)
29-
.default_model(name)
30-
.build();
40+
let listener = TcpListener::bind((Ipv4Addr::LOCALHOST, 0u16)).await?;
41+
let addr = listener.local_addr()?;
42+
let base_url = format!("http://{}/v1", addr);
3143

32-
let service = HandleError::new(CompleteService::new(manager), handle_error);
44+
let (shutdown_tx, mut shutdown_rx) = tokio::sync::watch::channel(());
3345

34-
let router = Router::new()
35-
.route_service("/v1/chat/completions", service)
36-
.layer(CorsLayer::permissive());
46+
let task = tokio::spawn(async move {
47+
axum::serve(listener, router)
48+
.with_graceful_shutdown(async move {
49+
let _ = shutdown_rx.changed().await;
50+
})
51+
.await
52+
.ok();
53+
});
3754

38-
let listener = TcpListener::bind((Ipv4Addr::LOCALHOST, 0u16)).await?;
39-
let addr = listener.local_addr()?;
40-
let base_url = format!("http://{}/v1", addr);
55+
tracing::info!(url = %base_url, "local LLM server started");
4156

42-
let (shutdown_tx, mut shutdown_rx) = tokio::sync::watch::channel(());
57+
Ok(Self {
58+
base_url,
59+
shutdown_tx,
60+
task,
61+
})
62+
}
4363

44-
let task = tokio::spawn(async move {
45-
axum::serve(listener, router)
46-
.with_graceful_shutdown(async move {
47-
let _ = shutdown_rx.changed().await;
48-
})
49-
.await
50-
.ok();
51-
});
64+
pub fn url(&self) -> &str {
65+
&self.base_url
66+
}
5267

53-
tracing::info!(url = %base_url, "local LLM server started");
68+
pub async fn stop(self) {
69+
let _ = self.shutdown_tx.send(());
70+
let _ = self.task.await;
71+
tracing::info!("local LLM server stopped");
72+
}
73+
}
5474

55-
Ok(Self {
56-
base_url,
57-
shutdown_tx,
58-
task,
59-
})
75+
async fn handle_error(err: hypr_llm_cactus::Error) -> (StatusCode, String) {
76+
(StatusCode::INTERNAL_SERVER_ERROR, err.to_string())
6077
}
78+
}
6179

62-
pub fn url(&self) -> &str {
63-
&self.base_url
80+
#[cfg(not(target_arch = "aarch64"))]
81+
mod inner {
82+
use std::path::Path;
83+
84+
use crate::{Error, ModelSelection};
85+
86+
pub struct LlmServer {
87+
_private: (),
6488
}
6589

66-
pub async fn stop(self) {
67-
let _ = self.shutdown_tx.send(());
68-
let _ = self.task.await;
69-
tracing::info!("local LLM server stopped");
90+
impl LlmServer {
91+
pub async fn start(_selection: &ModelSelection, _models_dir: &Path) -> Result<Self, Error> {
92+
Err(Error::Other(
93+
"Local LLM is not supported on this platform".to_string(),
94+
))
95+
}
96+
97+
pub fn url(&self) -> &str {
98+
unreachable!()
99+
}
100+
101+
pub async fn stop(self) {}
70102
}
71103
}
72104

73-
async fn handle_error(err: hypr_llm_cactus::Error) -> (StatusCode, String) {
74-
(StatusCode::INTERNAL_SERVER_ERROR, err.to_string())
75-
}
105+
pub use inner::LlmServer;

0 commit comments

Comments
 (0)