Skip to content

Commit 456fcf1

Browse files
authored
Merge branch 'main' into cijothomas/metric-export-remove-mut
2 parents ad83b13 + 1d610a2 commit 456fcf1

File tree

7 files changed

+107
-25
lines changed

7 files changed

+107
-25
lines changed

.github/workflows/ci.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ jobs:
102102
run: |
103103
cargo install [email protected]
104104
cd ${{ matrix.example }}
105-
cargo check-external-types --config allowed-external-types.toml
105+
cargo check-external-types --all-features --config allowed-external-types.toml
106106
msrv:
107107
strategy:
108108
matrix:

opentelemetry-otlp/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
## vNext
44

55
- Update `tonic` dependency version to 0.13
6+
- Re-export `tonic` types under `tonic_types`
7+
[2898](https://github.com/open-telemetry/opentelemetry-rust/pull/2898)
68

79
## 0.29.0
810

opentelemetry-otlp/allowed-external-types.toml

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,20 +3,16 @@
33
# This is used with cargo-check-external-types to reduce the surface area of downstream crates from
44
# the public API. Ideally this can have a few exceptions as possible.
55
allowed_external_types = [
6-
"opentelemetry::*",
76
"opentelemetry_http::*",
87
"opentelemetry_sdk::*",
9-
# http is a pre 1.0 crate
10-
"http::uri::InvalidUri",
11-
"http::header::name::InvalidHeaderName",
12-
"http::header::value::InvalidHeaderValue",
13-
# prost is a pre 1.0 crate
14-
"prost::error::EncodeError",
8+
# serde
9+
"serde::de::Deserialize",
10+
"serde::ser::Serialize",
1511
# tonic is a pre 1.0 crate
16-
"tonic::status::Code",
17-
"tonic::status::Status",
1812
"tonic::metadata::map::MetadataMap",
13+
"tonic::transport::channel::tls::ClientTlsConfig",
14+
"tonic::transport::tls::Certificate",
15+
"tonic::transport::tls::Identity",
1916
"tonic::transport::channel::Channel",
20-
"tonic::transport::error::Error",
2117
"tonic::service::interceptor::Interceptor",
2218
]

opentelemetry-otlp/src/lib.rs

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -450,3 +450,20 @@ pub enum Protocol {
450450
#[doc(hidden)]
451451
/// Placeholder type when no exporter pipeline has been configured in telemetry pipeline.
452452
pub struct NoExporterConfig(());
453+
454+
/// Re-exported types from the `tonic` crate.
455+
#[cfg(feature = "grpc-tonic")]
456+
pub mod tonic_types {
457+
/// Re-exported types from `tonic::metadata`.
458+
pub mod metadata {
459+
#[doc(no_inline)]
460+
pub use tonic::metadata::MetadataMap;
461+
}
462+
463+
/// Re-exported types from `tonic::transport`.
464+
#[cfg(feature = "tls")]
465+
pub mod transport {
466+
#[doc(no_inline)]
467+
pub use tonic::transport::{Certificate, ClientTlsConfig, Identity};
468+
}
469+
}

opentelemetry-sdk/src/metrics/internal/histogram.rs

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,10 @@ where
2727

2828
buckets.total += value;
2929
buckets.count += 1;
30-
buckets.counts[index] += 1;
30+
if !buckets.counts.is_empty() {
31+
buckets.counts[index] += 1;
32+
}
33+
3134
if value < buckets.min {
3235
buckets.min = value;
3336
}
@@ -96,7 +99,12 @@ impl<T: Number> Histogram<T> {
9699
bounds.sort_by(|a, b| a.partial_cmp(b).expect("NaNs filtered out"));
97100
}
98101

99-
let buckets_count = bounds.len() + 1;
102+
let buckets_count = if bounds.is_empty() {
103+
0
104+
} else {
105+
bounds.len() + 1
106+
};
107+
100108
Histogram {
101109
value_map: ValueMap::new(buckets_count, cardinality_limit),
102110
init_time: AggregateTimeInitiator::default(),

opentelemetry-sdk/src/metrics/mod.rs

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -440,6 +440,14 @@ mod tests {
440440
histogram_aggregation_with_custom_bounds_helper(Temporality::Cumulative);
441441
}
442442

443+
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
444+
async fn histogram_aggregation_with_empty_bounds() {
445+
// Run this test with stdout enabled to see output.
446+
// cargo test histogram_aggregation_with_empty_bounds --features=testing -- --nocapture
447+
histogram_aggregation_with_empty_bounds_helper(Temporality::Delta);
448+
histogram_aggregation_with_empty_bounds_helper(Temporality::Cumulative);
449+
}
450+
443451
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
444452
async fn updown_counter_aggregation_cumulative() {
445453
// Run this test with stdout enabled to see output.
@@ -2161,6 +2169,55 @@ mod tests {
21612169
assert_eq!(vec![1.0, 2.5, 5.5], data_point.bounds);
21622170
assert_eq!(vec![1, 1, 3, 0], data_point.bucket_counts);
21632171
}
2172+
2173+
fn histogram_aggregation_with_empty_bounds_helper(temporality: Temporality) {
2174+
let mut test_context = TestContext::new(temporality);
2175+
let histogram = test_context
2176+
.meter()
2177+
.u64_histogram("test_histogram")
2178+
.with_boundaries(vec![])
2179+
.build();
2180+
histogram.record(1, &[KeyValue::new("key1", "value1")]);
2181+
histogram.record(2, &[KeyValue::new("key1", "value1")]);
2182+
histogram.record(3, &[KeyValue::new("key1", "value1")]);
2183+
histogram.record(4, &[KeyValue::new("key1", "value1")]);
2184+
histogram.record(5, &[KeyValue::new("key1", "value1")]);
2185+
2186+
test_context.flush_metrics();
2187+
2188+
// Assert
2189+
let MetricData::Histogram(histogram_data) =
2190+
test_context.get_aggregation::<u64>("test_histogram", None)
2191+
else {
2192+
unreachable!()
2193+
};
2194+
// Expecting 1 time-series.
2195+
assert_eq!(histogram_data.data_points.len(), 1);
2196+
if let Temporality::Cumulative = temporality {
2197+
assert_eq!(
2198+
histogram_data.temporality,
2199+
Temporality::Cumulative,
2200+
"Should produce cumulative"
2201+
);
2202+
} else {
2203+
assert_eq!(
2204+
histogram_data.temporality,
2205+
Temporality::Delta,
2206+
"Should produce delta"
2207+
);
2208+
}
2209+
2210+
// find and validate key1=value1 datapoint
2211+
let data_point =
2212+
find_histogram_datapoint_with_key_value(&histogram_data.data_points, "key1", "value1")
2213+
.expect("datapoint with key1=value1 expected");
2214+
2215+
assert_eq!(data_point.count, 5);
2216+
assert_eq!(data_point.sum, 15);
2217+
assert!(data_point.bounds.is_empty());
2218+
assert!(data_point.bucket_counts.is_empty());
2219+
}
2220+
21642221
fn gauge_aggregation_helper(temporality: Temporality) {
21652222
// Arrange
21662223
let mut test_context = TestContext::new(temporality);

opentelemetry-stdout/src/metrics/exporter.rs

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -235,19 +235,21 @@ fn print_hist_data_points<T: Debug>(data_points: &[HistogramDataPoint<T>]) {
235235
println!("\t\t\t\t -> {}: {}", kv.key, kv.value.as_str());
236236
}
237237

238-
println!("\t\t\tBuckets");
239-
let mut lower_bound = f64::NEG_INFINITY;
240-
for (i, &upper_bound) in data_point.bounds.iter().enumerate() {
241-
let count = data_point.bucket_counts.get(i).unwrap_or(&0);
242-
println!("\t\t\t\t {} to {} : {}", lower_bound, upper_bound, count);
243-
lower_bound = upper_bound;
244-
}
238+
if !data_point.bucket_counts.is_empty() {
239+
println!("\t\t\tBuckets");
240+
let mut lower_bound = f64::NEG_INFINITY;
241+
for (i, &upper_bound) in data_point.bounds.iter().enumerate() {
242+
let count = data_point.bucket_counts.get(i).unwrap_or(&0);
243+
println!("\t\t\t\t {} to {} : {}", lower_bound, upper_bound, count);
244+
lower_bound = upper_bound;
245+
}
245246

246-
let last_count = data_point
247-
.bucket_counts
248-
.get(data_point.bounds.len())
249-
.unwrap_or(&0);
250-
println!("\t\t\t\t{} to +Infinity : {}", lower_bound, last_count);
247+
let last_count = data_point
248+
.bucket_counts
249+
.get(data_point.bounds.len())
250+
.unwrap_or(&0);
251+
println!("\t\t\t\t{} to +Infinity : {}", lower_bound, last_count);
252+
}
251253
}
252254
}
253255

0 commit comments

Comments
 (0)