Skip to content

Commit 8bafcf0

Browse files
committed
feat: add HTTP server mode with JSON and Prometheus endpoints (#34, #53)
1 parent d09a274 commit 8bafcf0

File tree

3 files changed

+214
-0
lines changed

3 files changed

+214
-0
lines changed

readme.md

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ Usage: macmon [OPTIONS] [COMMAND]
6464

6565
Commands:
6666
pipe Output metrics in JSON format
67+
serve Serve metrics over HTTP
6768
debug Print debug information
6869
help Print this message or the help of the given subcommand(s)
6970

@@ -124,6 +125,59 @@ This will collect 10 samples with an update interval of 500 milliseconds.
124125
}
125126
```
126127

128+
## 🌐 HTTP Server
129+
130+
You can use the `serve` subcommand to expose metrics over HTTP. This is useful for integrating with monitoring systems like [Prometheus](https://prometheus.io/) and [Grafana](https://grafana.com/).
131+
132+
```sh
133+
macmon serve # default port 9090, interval 1000ms
134+
macmon serve -p 8080 # custom port
135+
macmon serve -i 500 # sampling interval 500ms
136+
macmon serve & # run in background
137+
```
138+
139+
Two endpoints are available:
140+
141+
| Endpoint | Format | Description |
142+
|---|---|---|
143+
| `GET /json` | JSON | Current metrics snapshot (same format as `pipe --soc-info`) |
144+
| `GET /metrics` | Prometheus | Metrics in [Prometheus text format](https://prometheus.io/docs/instrumenting/exposition_formats/) |
145+
146+
### Prometheus / Grafana setup
147+
148+
Add a scrape target to your `prometheus.yml`:
149+
150+
```yaml
151+
scrape_configs:
152+
- job_name: macmon
153+
static_configs:
154+
- targets: ["localhost:9090"]
155+
```
156+
157+
Then import or build a Grafana dashboard querying metrics such as:
158+
159+
```
160+
macmon_cpu_power_watts{chip="Apple M3 Pro"}
161+
macmon_ecpu_usage_ratio{chip="Apple M3 Pro"}
162+
macmon_memory_ram_used_bytes{chip="Apple M3 Pro"}
163+
```
164+
165+
### Prometheus output example
166+
167+
```
168+
# HELP macmon_cpu_temp_celsius Average CPU temperature in Celsius
169+
# TYPE macmon_cpu_temp_celsius gauge
170+
macmon_cpu_temp_celsius{chip="Apple M3 Pro"} 47.3
171+
172+
# HELP macmon_cpu_power_watts CPU power consumption in Watts
173+
# TYPE macmon_cpu_power_watts gauge
174+
macmon_cpu_power_watts{chip="Apple M3 Pro"} 8.42
175+
176+
# HELP macmon_ecpu_usage_ratio Efficiency CPU cluster utilization (0–1)
177+
# TYPE macmon_ecpu_usage_ratio gauge
178+
macmon_ecpu_usage_ratio{chip="Apple M3 Pro"} 0.083
179+
```
180+
127181
## 📦 Build from Source
128182

129183
1. Install [Rust toolchain](https://www.rust-lang.org/tools/install)

src/main.rs

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
use clap::{CommandFactory, Parser, Subcommand, parser::ValueSource};
22
use macmon::{App, Sampler, debug};
33
use std::error::Error;
4+
use std::sync::{Arc, Mutex};
5+
use std::thread;
6+
7+
mod serve;
48

59
#[derive(Debug, Subcommand)]
610
enum Commands {
@@ -16,6 +20,13 @@ enum Commands {
1620
soc_info: bool,
1721
},
1822

23+
/// Serve metrics over HTTP (JSON at / and Prometheus at /metrics)
24+
Serve {
25+
/// Port to listen on
26+
#[arg(short, long, default_value_t = 9090)]
27+
port: u16,
28+
},
29+
1930
/// Print debug information
2031
Debug,
2132
}
@@ -61,6 +72,27 @@ fn main() -> Result<(), Box<dyn Error>> {
6172
}
6273
}
6374
}
75+
Some(Commands::Serve { port }) => {
76+
let mut sampler = Sampler::new()?;
77+
let soc = Arc::new(sampler.get_soc_info().clone());
78+
let shared: serve::SharedMetrics = Arc::new(Mutex::new(None));
79+
80+
let shared_http = Arc::clone(&shared);
81+
let soc_http = Arc::clone(&soc);
82+
let port = *port;
83+
thread::spawn(move || {
84+
if let Err(e) = serve::run(port, shared_http, soc_http) {
85+
eprintln!("server error: {e}");
86+
}
87+
});
88+
89+
loop {
90+
match sampler.get_metrics(args.interval.max(100)) {
91+
Ok(m) => *shared.lock().unwrap() = Some(m),
92+
Err(e) => eprintln!("sampling error: {e}"),
93+
}
94+
}
95+
}
6496
Some(Commands::Debug) => debug::print_debug()?,
6597
_ => {
6698
let mut app = App::new()?;

src/serve.rs

Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
use std::io::{Read, Write};
2+
use std::net::{TcpListener, TcpStream};
3+
use std::sync::{Arc, Mutex};
4+
use std::thread;
5+
6+
use macmon::{Metrics, SocInfo};
7+
8+
pub type SharedMetrics = Arc<Mutex<Option<Metrics>>>;
9+
10+
#[rustfmt::skip]
11+
fn to_prometheus(m: &Metrics, soc: &SocInfo) -> String {
12+
let chip = &soc.chip_name;
13+
let l = format!(r#"chip="{chip}""#);
14+
15+
macro_rules! gauge {
16+
($out:expr, $name:literal, $help:literal, $value:expr) => {
17+
$out.push_str(&format!(
18+
"# HELP {} {}\n# TYPE {} gauge\n{}{{{l}}} {}\n\n",
19+
$name, $help, $name, $name, $value
20+
));
21+
};
22+
}
23+
24+
let mut out = String::new();
25+
gauge!(out, "macmon_cpu_temp_celsius", "Average CPU temperature in Celsius", m.temp.cpu_temp_avg);
26+
gauge!(out, "macmon_gpu_temp_celsius", "Average GPU temperature in Celsius", m.temp.gpu_temp_avg);
27+
gauge!(out, "macmon_memory_ram_total_bytes", "Total RAM in bytes", m.memory.ram_total);
28+
gauge!(out, "macmon_memory_ram_used_bytes", "Used RAM in bytes", m.memory.ram_usage);
29+
gauge!(out, "macmon_memory_swap_total_bytes", "Total swap in bytes", m.memory.swap_total);
30+
gauge!(out, "macmon_memory_swap_used_bytes", "Used swap in bytes", m.memory.swap_usage);
31+
gauge!(out, "macmon_ecpu_freq_mhz", "Efficiency CPU cluster frequency in MHz", m.ecpu_usage.0);
32+
gauge!(out, "macmon_ecpu_usage_ratio", "Efficiency CPU cluster utilization (0–1)", m.ecpu_usage.1);
33+
gauge!(out, "macmon_pcpu_freq_mhz", "Performance CPU cluster frequency in MHz", m.pcpu_usage.0);
34+
gauge!(out, "macmon_pcpu_usage_ratio", "Performance CPU cluster utilization (0–1)", m.pcpu_usage.1);
35+
gauge!(out, "macmon_gpu_freq_mhz", "GPU frequency in MHz", m.gpu_usage.0);
36+
gauge!(out, "macmon_gpu_usage_ratio", "GPU utilization (0–1)", m.gpu_usage.1);
37+
gauge!(out, "macmon_cpu_power_watts", "CPU power consumption in Watts", m.cpu_power);
38+
gauge!(out, "macmon_gpu_power_watts", "GPU power consumption in Watts", m.gpu_power);
39+
gauge!(out, "macmon_ane_power_watts", "Apple Neural Engine power consumption in Watts", m.ane_power);
40+
gauge!(out, "macmon_all_power_watts", "Combined CPU+GPU+ANE power consumption in Watts", m.all_power);
41+
gauge!(out, "macmon_sys_power_watts", "Total system power consumption in Watts", m.sys_power);
42+
gauge!(out, "macmon_ram_power_watts", "RAM power consumption in Watts", m.ram_power);
43+
gauge!(out, "macmon_gpu_ram_power_watts", "GPU RAM power consumption in Watts", m.gpu_ram_power);
44+
out
45+
}
46+
47+
fn to_json(m: &Metrics, soc: &SocInfo) -> String {
48+
let mut doc = serde_json::to_value(m).unwrap_or_default();
49+
doc["soc"] = serde_json::to_value(soc).unwrap_or_default();
50+
doc["timestamp"] = serde_json::to_value(chrono::Utc::now().to_rfc3339()).unwrap_or_default();
51+
serde_json::to_string(&doc).unwrap_or_default()
52+
}
53+
54+
fn read_path(stream: &mut TcpStream) -> Option<String> {
55+
let mut buf = [0u8; 2048];
56+
let n = stream.read(&mut buf).ok()?;
57+
let text = std::str::from_utf8(&buf[..n]).ok()?;
58+
let path = text.lines().next()?.split_whitespace().nth(1)?;
59+
Some(path.split('?').next().unwrap_or(path).to_string())
60+
}
61+
62+
fn write_response(stream: &mut TcpStream, status: u16, content_type: &str, body: String) {
63+
let status_text = match status {
64+
200 => "OK",
65+
404 => "Not Found",
66+
503 => "Service Unavailable",
67+
_ => "OK",
68+
};
69+
let _ = stream.write_all(
70+
format!(
71+
"HTTP/1.1 {status} {status_text}\r\nContent-Type: {content_type}\r\nContent-Length: {}\r\nConnection: close\r\n\r\n{body}",
72+
body.len()
73+
)
74+
.as_bytes(),
75+
);
76+
}
77+
78+
fn handle_conn(mut stream: TcpStream, shared: SharedMetrics, soc: Arc<SocInfo>) {
79+
let path = match read_path(&mut stream) {
80+
Some(p) => p,
81+
None => return,
82+
};
83+
84+
let lock = shared.lock().unwrap();
85+
86+
let Some(m) = lock.as_ref() else {
87+
drop(lock);
88+
write_response(&mut stream, 503, "application/json", r#"{"error":"no data yet"}"#.to_string());
89+
return;
90+
};
91+
92+
match path.as_str() {
93+
"/json" => {
94+
let body = to_json(m, &soc);
95+
drop(lock);
96+
write_response(&mut stream, 200, "application/json", body);
97+
}
98+
"/metrics" => {
99+
let body = to_prometheus(m, &soc);
100+
drop(lock);
101+
write_response(&mut stream, 200, "text/plain; version=0.0.4; charset=utf-8", body);
102+
}
103+
_ => {
104+
drop(lock);
105+
write_response(&mut stream, 404, "application/json", r#"{"error":"not found"}"#.to_string());
106+
}
107+
}
108+
}
109+
110+
pub fn run(
111+
port: u16,
112+
shared: SharedMetrics,
113+
soc: Arc<SocInfo>,
114+
) -> Result<(), Box<dyn std::error::Error>> {
115+
let listener = TcpListener::bind(format!("0.0.0.0:{port}"))?;
116+
eprintln!("macmon serving on http://localhost:{port}");
117+
eprintln!(" GET /json → JSON metrics");
118+
eprintln!(" GET /metrics → Prometheus format");
119+
120+
for stream in listener.incoming() {
121+
let Ok(stream) = stream else { continue };
122+
let shared = Arc::clone(&shared);
123+
let soc = Arc::clone(&soc);
124+
thread::spawn(move || handle_conn(stream, shared, soc));
125+
}
126+
127+
Ok(())
128+
}

0 commit comments

Comments
 (0)