Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
e6c05b6
refactor(udp-tracker-server): [#1589] move average processing time ca…
josecelano Jun 19, 2025
d50948e
refactor: [#1598] make recalculate udp avg connect processing time me…
josecelano Jun 19, 2025
59fbb39
refactor: [#1598] make recalculate udp avg announce processing time m…
josecelano Jun 19, 2025
47c2949
refactor: [#1598] make recalculate udp avg scrape processing time met…
josecelano Jun 19, 2025
1c13b12
fix: [#1589] partially. Moving average calculated for each time series
josecelano Jun 19, 2025
164de92
refactor: [#1589] remvoe duplicate code
josecelano Jun 19, 2025
ed5f1e6
fix: [#1589] add dedicated metric for UDP request processing in movin…
josecelano Jun 19, 2025
384b887
feat(metrics): [#1589] add Avg (average) aggregate function
josecelano Jun 20, 2025
8fbcf90
refactor(metrics): extract collect_matching_samples to Metric<T> impl
josecelano Jun 20, 2025
f402b02
chore: remove deprecated comment
josecelano Jun 20, 2025
caa69ae
test: [#1589] remove uneeded test
josecelano Jun 20, 2025
ba3d8a9
fix: format
josecelano Jun 20, 2025
cd57f7a
fix: [#1589] use average aggregation for UDP processing time metrics
josecelano Jun 20, 2025
4c082fa
refactor: [#1589] make methods private
josecelano Jun 20, 2025
a9acca5
refactor: [#1589] rename methods and remove unused code
josecelano Jun 20, 2025
dc8d4a9
test: [#1589] add race condition test for UDP performance metrics
josecelano Jun 20, 2025
b423bf6
refactor: [#1589] improve readability of UDP performance metrics race…
josecelano Jun 20, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion packages/metrics/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ println!("{}", prometheus_output);
### Metric Aggregation

```rust
use torrust_tracker_metrics::metric_collection::aggregate::Sum;
use torrust_tracker_metrics::metric_collection::aggregate::{Sum, Avg};

// Sum all counter values matching specific labels
let total_requests = metrics.sum(
Expand All @@ -76,6 +76,14 @@ let total_requests = metrics.sum(
);

println!("Total requests: {:?}", total_requests);

// Calculate average of gauge values matching specific labels
let avg_response_time = metrics.avg(
&metric_name!("response_time_seconds"),
&[("endpoint", "/announce")].into(),
);

println!("Average response time: {:?}", avg_response_time);
```

## Architecture
Expand Down
294 changes: 294 additions & 0 deletions packages/metrics/src/metric/aggregate/avg.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,294 @@
use crate::counter::Counter;
use crate::gauge::Gauge;
use crate::label::LabelSet;
use crate::metric::aggregate::sum::Sum;
use crate::metric::Metric;

pub trait Avg {
type Output;
fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output;
}

impl Avg for Metric<Counter> {
type Output = f64;

fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output {
let matching_samples = self.collect_matching_samples(label_set_criteria);

if matching_samples.is_empty() {
return 0.0;
}

let sum = self.sum(label_set_criteria);

#[allow(clippy::cast_precision_loss)]
(sum as f64 / matching_samples.len() as f64)
}
}

impl Avg for Metric<Gauge> {
type Output = f64;

fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output {
let matching_samples = self.collect_matching_samples(label_set_criteria);

if matching_samples.is_empty() {
return 0.0;
}

let sum = self.sum(label_set_criteria);

#[allow(clippy::cast_precision_loss)]
(sum / matching_samples.len() as f64)
}
}

#[cfg(test)]
mod tests {

use torrust_tracker_primitives::DurationSinceUnixEpoch;

use crate::counter::Counter;
use crate::gauge::Gauge;
use crate::label::LabelSet;
use crate::metric::aggregate::avg::Avg;
use crate::metric::{Metric, MetricName};
use crate::metric_name;
use crate::sample::Sample;
use crate::sample_collection::SampleCollection;

struct MetricBuilder<T> {
sample_time: DurationSinceUnixEpoch,
name: MetricName,
samples: Vec<Sample<T>>,
}

impl<T> Default for MetricBuilder<T> {
fn default() -> Self {
Self {
sample_time: DurationSinceUnixEpoch::from_secs(1_743_552_000),
name: metric_name!("test_metric"),
samples: vec![],
}
}
}

impl<T> MetricBuilder<T> {
fn with_sample(mut self, value: T, label_set: &LabelSet) -> Self {
let sample = Sample::new(value, self.sample_time, label_set.clone());
self.samples.push(sample);
self
}

fn build(self) -> Metric<T> {
Metric::new(
self.name,
None,
None,
SampleCollection::new(self.samples).expect("invalid samples"),
)
}
}

fn counter_cases() -> Vec<(Metric<Counter>, LabelSet, f64)> {
// (metric, label set criteria, expected_average_value)
vec![
// Metric with one sample without label set
(
MetricBuilder::default().with_sample(1.into(), &LabelSet::empty()).build(),
LabelSet::empty(),
1.0,
),
// Metric with one sample with a label set
(
MetricBuilder::default()
.with_sample(1.into(), &[("l1", "l1_value")].into())
.build(),
[("l1", "l1_value")].into(),
1.0,
),
// Metric with two samples, different label sets, average all
(
MetricBuilder::default()
.with_sample(1.into(), &[("l1", "l1_value")].into())
.with_sample(3.into(), &[("l2", "l2_value")].into())
.build(),
LabelSet::empty(),
2.0, // (1 + 3) / 2 = 2.0
),
// Metric with two samples, different label sets, average one
(
MetricBuilder::default()
.with_sample(1.into(), &[("l1", "l1_value")].into())
.with_sample(2.into(), &[("l2", "l2_value")].into())
.build(),
[("l1", "l1_value")].into(),
1.0,
),
// Metric with three samples, same label key, different label values, average by key
(
MetricBuilder::default()
.with_sample(2.into(), &[("l1", "l1_value"), ("la", "la_value")].into())
.with_sample(4.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into())
.with_sample(6.into(), &[("l1", "l1_value"), ("lc", "lc_value")].into())
.build(),
[("l1", "l1_value")].into(),
4.0, // (2 + 4 + 6) / 3 = 4.0
),
// Metric with two samples, different label values, average by subkey
(
MetricBuilder::default()
.with_sample(5.into(), &[("l1", "l1_value"), ("la", "la_value")].into())
.with_sample(7.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into())
.build(),
[("la", "la_value")].into(),
5.0,
),
// Edge: Metric with no samples at all
(MetricBuilder::default().build(), LabelSet::empty(), 0.0),
// Edge: Metric with samples but no matching labels
(
MetricBuilder::default()
.with_sample(5.into(), &[("foo", "bar")].into())
.build(),
[("not", "present")].into(),
0.0,
),
// Edge: Metric with zero value
(
MetricBuilder::default()
.with_sample(0.into(), &[("l3", "l3_value")].into())
.build(),
[("l3", "l3_value")].into(),
0.0,
),
// Edge: Metric with a very large value
(
MetricBuilder::default()
.with_sample((u64::MAX / 2).into(), &[("edge", "large1")].into())
.with_sample((u64::MAX / 2).into(), &[("edge", "large2")].into())
.build(),
LabelSet::empty(),
#[allow(clippy::cast_precision_loss)]
(u64::MAX as f64 / 2.0), // Average of (max/2) and (max/2)
),
]
}

fn gauge_cases() -> Vec<(Metric<Gauge>, LabelSet, f64)> {
// (metric, label set criteria, expected_average_value)
vec![
// Metric with one sample without label set
(
MetricBuilder::default().with_sample(1.0.into(), &LabelSet::empty()).build(),
LabelSet::empty(),
1.0,
),
// Metric with one sample with a label set
(
MetricBuilder::default()
.with_sample(1.0.into(), &[("l1", "l1_value")].into())
.build(),
[("l1", "l1_value")].into(),
1.0,
),
// Metric with two samples, different label sets, average all
(
MetricBuilder::default()
.with_sample(1.0.into(), &[("l1", "l1_value")].into())
.with_sample(3.0.into(), &[("l2", "l2_value")].into())
.build(),
LabelSet::empty(),
2.0, // (1.0 + 3.0) / 2 = 2.0
),
// Metric with two samples, different label sets, average one
(
MetricBuilder::default()
.with_sample(1.0.into(), &[("l1", "l1_value")].into())
.with_sample(2.0.into(), &[("l2", "l2_value")].into())
.build(),
[("l1", "l1_value")].into(),
1.0,
),
// Metric with three samples, same label key, different label values, average by key
(
MetricBuilder::default()
.with_sample(2.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into())
.with_sample(4.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into())
.with_sample(6.0.into(), &[("l1", "l1_value"), ("lc", "lc_value")].into())
.build(),
[("l1", "l1_value")].into(),
4.0, // (2.0 + 4.0 + 6.0) / 3 = 4.0
),
// Metric with two samples, different label values, average by subkey
(
MetricBuilder::default()
.with_sample(5.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into())
.with_sample(7.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into())
.build(),
[("la", "la_value")].into(),
5.0,
),
// Edge: Metric with no samples at all
(MetricBuilder::default().build(), LabelSet::empty(), 0.0),
// Edge: Metric with samples but no matching labels
(
MetricBuilder::default()
.with_sample(5.0.into(), &[("foo", "bar")].into())
.build(),
[("not", "present")].into(),
0.0,
),
// Edge: Metric with zero value
(
MetricBuilder::default()
.with_sample(0.0.into(), &[("l3", "l3_value")].into())
.build(),
[("l3", "l3_value")].into(),
0.0,
),
// Edge: Metric with negative values
(
MetricBuilder::default()
.with_sample((-2.0).into(), &[("l4", "l4_value")].into())
.with_sample(4.0.into(), &[("l5", "l5_value")].into())
.build(),
LabelSet::empty(),
1.0, // (-2.0 + 4.0) / 2 = 1.0
),
// Edge: Metric with decimal values
(
MetricBuilder::default()
.with_sample(1.5.into(), &[("l6", "l6_value")].into())
.with_sample(2.5.into(), &[("l7", "l7_value")].into())
.build(),
LabelSet::empty(),
2.0, // (1.5 + 2.5) / 2 = 2.0
),
]
}

#[test]
fn test_counter_cases() {
for (idx, (metric, criteria, expected_value)) in counter_cases().iter().enumerate() {
let avg = metric.avg(criteria);

assert!(
(avg - expected_value).abs() <= f64::EPSILON,
"at case {idx}, expected avg to be {expected_value}, got {avg}"
);
}
}

#[test]
fn test_gauge_cases() {
for (idx, (metric, criteria, expected_value)) in gauge_cases().iter().enumerate() {
let avg = metric.avg(criteria);

assert!(
(avg - expected_value).abs() <= f64::EPSILON,
"at case {idx}, expected avg to be {expected_value}, got {avg}"
);
}
}
}
1 change: 1 addition & 0 deletions packages/metrics/src/metric/aggregate/mod.rs
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
pub mod avg;
pub mod sum;
11 changes: 11 additions & 0 deletions packages/metrics/src/metric/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,17 @@ impl<T> Metric<T> {
pub fn is_empty(&self) -> bool {
self.sample_collection.is_empty()
}

#[must_use]
pub fn collect_matching_samples(
&self,
label_set_criteria: &LabelSet,
) -> Vec<(&crate::label::LabelSet, &crate::sample::Measurement<T>)> {
self.sample_collection
.iter()
.filter(|(label_set, _measurement)| label_set.matches(label_set_criteria))
.collect()
}
}

impl Metric<Counter> {
Expand Down
Loading