diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/AttributeMatcher.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/AttributeMatcher.java new file mode 100644 index 000000000..491756311 --- /dev/null +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/AttributeMatcher.java @@ -0,0 +1,61 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.jmxscraper.assertions; + +import javax.annotation.Nullable; + +/** Implements functionality of matching data point attributes. */ +public class AttributeMatcher { + private final String attributeName; + @Nullable private final String attributeValue; + + /** + * Create instance used to match data point attribute with any value. + * + * @param attributeName matched attribute name + */ + AttributeMatcher(String attributeName) { + this(attributeName, null); + } + + /** + * Create instance used to match data point attribute with te same name and with the same value. + * + * @param attributeName attribute name + * @param attributeValue attribute value + */ + AttributeMatcher(String attributeName, @Nullable String attributeValue) { + this.attributeName = attributeName; + this.attributeValue = attributeValue; + } + + /** + * Return name of data point attribute that this AttributeMatcher is supposed to match value with. + * + * @return name of validated attribute + */ + public String getAttributeName() { + return attributeName; + } + + @Override + public String toString() { + return attributeValue == null + ? '{' + attributeName + '}' + : '{' + attributeName + '=' + attributeValue + '}'; + } + + /** + * Verify if this matcher is matching provided attribute value. If this matcher holds null value + * then it is matching any attribute value. + * + * @param value a value to be matched + * @return true if this matcher is matching provided value, false otherwise. + */ + boolean matchesValue(String value) { + return attributeValue == null || attributeValue.equals(value); + } +} diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/AttributeMatcherGroup.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/AttributeMatcherGroup.java new file mode 100644 index 000000000..df87d739d --- /dev/null +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/AttributeMatcherGroup.java @@ -0,0 +1,60 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.jmxscraper.assertions; + +import java.util.Collection; +import java.util.Map; +import java.util.stream.Collectors; + +/** Group of attribute matchers */ +public class AttributeMatcherGroup { + + // stored as a Map for easy lookup by name + private final Map matchers; + + /** + * Constructor for a set of attribute matchers + * + * @param matchers collection of matchers to build a group from + * @throws IllegalStateException if there is any duplicate key + */ + AttributeMatcherGroup(Collection matchers) { + this.matchers = + matchers.stream().collect(Collectors.toMap(AttributeMatcher::getAttributeName, m -> m)); + } + + /** + * Checks if attributes match this attribute matcher group + * + * @param attributes attributes to check as map + * @return {@literal true} when the attributes match all attributes from this group + */ + public boolean matches(Map attributes) { + if (attributes.size() != matchers.size()) { + return false; + } + + for (Map.Entry entry : attributes.entrySet()) { + AttributeMatcher matcher = matchers.get(entry.getKey()); + if (matcher == null) { + // no matcher for this key: unexpected key + return false; + } + + if (!matcher.matchesValue(entry.getValue())) { + // value does not match: unexpected value + return false; + } + } + + return true; + } + + @Override + public String toString() { + return matchers.values().toString(); + } +} diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/DataPointAttributes.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/DataPointAttributes.java new file mode 100644 index 000000000..4545c4f34 --- /dev/null +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/DataPointAttributes.java @@ -0,0 +1,53 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.contrib.jmxscraper.assertions; + +import java.util.Arrays; + +/** + * Utility class implementing convenience static methods to construct data point attribute matchers + * and sets of matchers. + */ +public class DataPointAttributes { + private DataPointAttributes() {} + + /** + * Create instance of matcher that should be used to check if data point attribute with given name + * has value identical to the one provided as a parameter (exact match). + * + * @param name name of the data point attribute to check + * @param value expected value of checked data point attribute + * @return instance of matcher + */ + public static AttributeMatcher attribute(String name, String value) { + return new AttributeMatcher(name, value); + } + + /** + * Create instance of matcher that should be used to check if data point attribute with given name + * exists. Any value of the attribute is considered as matching (any value match). + * + * @param name name of the data point attribute to check + * @return instance of matcher + */ + public static AttributeMatcher attributeWithAnyValue(String name) { + return new AttributeMatcher(name); + } + + /** + * Creates a group of attribute matchers that should be used to verify data point attributes. + * + * @param attributes list of matchers to create group. It must contain matchers with unique names. + * @return group of attribute matchers + * @throws IllegalArgumentException if provided list contains two or more matchers with the same + * attribute name + * @see MetricAssert#hasDataPointsWithAttributes(AttributeMatcherGroup...) for detailed + * description off the algorithm used for matching + */ + public static AttributeMatcherGroup attributeGroup(AttributeMatcher... attributes) { + return new AttributeMatcherGroup(Arrays.asList(attributes)); + } +} diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/MetricAssert.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/MetricAssert.java index f44c57888..1a4eaace3 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/MetricAssert.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/assertions/MetricAssert.java @@ -5,13 +5,14 @@ package io.opentelemetry.contrib.jmxscraper.assertions; +import static io.opentelemetry.contrib.jmxscraper.assertions.DataPointAttributes.attributeGroup; + import com.google.errorprone.annotations.CanIgnoreReturnValue; import io.opentelemetry.proto.common.v1.KeyValue; import io.opentelemetry.proto.metrics.v1.Metric; import io.opentelemetry.proto.metrics.v1.NumberDataPoint; import java.util.Arrays; import java.util.Collection; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -21,7 +22,6 @@ import org.assertj.core.api.AbstractAssert; import org.assertj.core.internal.Integers; import org.assertj.core.internal.Iterables; -import org.assertj.core.internal.Maps; import org.assertj.core.internal.Objects; public class MetricAssert extends AbstractAssert { @@ -29,7 +29,6 @@ public class MetricAssert extends AbstractAssert { private static final Objects objects = Objects.instance(); private static final Iterables iterables = Iterables.instance(); private static final Integers integers = Integers.instance(); - private static final Maps maps = Maps.instance(); private boolean strict; @@ -59,7 +58,7 @@ private void strictCheck( if (!strict) { return; } - String failMsgPrefix = expectedCheckStatus ? "duplicate" : "missing"; + String failMsgPrefix = expectedCheckStatus ? "Missing" : "Duplicate"; info.description( "%s assertion on %s for metric '%s'", failMsgPrefix, metricProperty, actual.getName()); objects.assertEqual(info, actualCheckStatus, expectedCheckStatus); @@ -122,8 +121,8 @@ private MetricAssert hasSum(boolean monotonic) { info.description("sum expected for metric '%s'", actual.getName()); objects.assertEqual(info, actual.hasSum(), true); - String prefix = monotonic ? "monotonic" : "non-monotonic"; - info.description(prefix + " sum expected for metric '%s'", actual.getName()); + String sumType = monotonic ? "monotonic" : "non-monotonic"; + info.description("sum for metric '%s' is expected to be %s", actual.getName(), sumType); objects.assertEqual(info, actual.getSum().getIsMonotonic(), monotonic); return this; } @@ -156,6 +155,11 @@ public MetricAssert isUpDownCounter() { return this; } + /** + * Verifies that there is no attribute in any of data points. + * + * @return this + */ @CanIgnoreReturnValue public MetricAssert hasDataPointsWithoutAttributes() { isNotNull(); @@ -195,6 +199,7 @@ private MetricAssert checkDataPoints(Consumer> listConsume return this; } + // TODO: To be removed and calls will be replaced with hasDataPointsWithAttributes() @CanIgnoreReturnValue public MetricAssert hasTypedDataPoints(Collection types) { return checkDataPoints( @@ -229,58 +234,43 @@ private void dataPointsCommonCheck(List dataPoints) { } /** - * Verifies that all data points have all the expected attributes + * Verifies that all metric data points have the same expected one attribute * - * @param attributes expected attributes + * @param expectedAttribute attribute matcher to validate data points attributes * @return this */ - @SafeVarargs @CanIgnoreReturnValue - public final MetricAssert hasDataPointsAttributes(Map.Entry... attributes) { - return checkDataPoints( - dataPoints -> { - dataPointsCommonCheck(dataPoints); - - Map attributesMap = new HashMap<>(); - for (Map.Entry attributeEntry : attributes) { - attributesMap.put(attributeEntry.getKey(), attributeEntry.getValue()); - } - for (NumberDataPoint dataPoint : dataPoints) { - Map dataPointAttributes = toMap(dataPoint.getAttributesList()); - - // all attributes must match - info.description( - "missing/unexpected data points attributes for metric '%s'", actual.getName()); - containsExactly(dataPointAttributes, attributes); - maps.assertContainsAllEntriesOf(info, dataPointAttributes, attributesMap); - } - }); + public final MetricAssert hasDataPointsWithOneAttribute(AttributeMatcher expectedAttribute) { + return hasDataPointsWithAttributes(attributeGroup(expectedAttribute)); } /** - * Verifies that all data points have their attributes match one of the attributes set and that - * all provided attributes sets matched at least once. + * Verifies that every data point attributes is matched exactly by one of the matcher groups + * provided. Also, each matcher group must match at least one data point attributes set. Data + * point attributes are matched by matcher group if each attribute is matched by one matcher and + * each matcher matches one attribute. In other words: number of attributes is the same as number + * of matchers and there is 1:1 matching between them. * - * @param attributeSets sets of attributes as maps + * @param matcherGroups array of attribute matcher groups * @return this */ - @SafeVarargs @CanIgnoreReturnValue - @SuppressWarnings("varargs") // required to avoid warning - public final MetricAssert hasDataPointsAttributes(Map... attributeSets) { + public final MetricAssert hasDataPointsWithAttributes(AttributeMatcherGroup... matcherGroups) { return checkDataPoints( dataPoints -> { dataPointsCommonCheck(dataPoints); - boolean[] matchedSets = new boolean[attributeSets.length]; + boolean[] matchedSets = new boolean[matcherGroups.length]; - // validate each datapoint attributes match exactly one of the provided attributes set + // validate each datapoint attributes match exactly one of the provided attributes sets for (NumberDataPoint dataPoint : dataPoints) { - Map map = toMap(dataPoint.getAttributesList()); - + Map dataPointAttributes = + dataPoint.getAttributesList().stream() + .collect( + Collectors.toMap(KeyValue::getKey, kv -> kv.getValue().getStringValue())); int matchCount = 0; - for (int i = 0; i < attributeSets.length; i++) { - if (mapEquals(map, attributeSets[i])) { + for (int i = 0; i < matcherGroups.length; i++) { + if (matcherGroups[i].matches(dataPointAttributes)) { matchedSets[i] = true; matchCount++; } @@ -288,7 +278,7 @@ public final MetricAssert hasDataPointsAttributes(Map... attribu info.description( "data point attributes '%s' for metric '%s' must match exactly one of the attribute sets '%s'", - map, actual.getName(), Arrays.asList(attributeSets)); + dataPointAttributes, actual.getName(), Arrays.asList(matcherGroups)); integers.assertEqual(info, matchCount, 1); } @@ -296,35 +286,9 @@ public final MetricAssert hasDataPointsAttributes(Map... attribu for (int i = 0; i < matchedSets.length; i++) { info.description( "no data point matched attribute set '%s' for metric '%s'", - attributeSets[i], actual.getName()); + matcherGroups[i], actual.getName()); objects.assertEqual(info, matchedSets[i], true); } }); } - - /** - * Map equality utility - * - * @param m1 first map - * @param m2 second map - * @return true if the maps have exactly the same keys and values - */ - private static boolean mapEquals(Map m1, Map m2) { - if (m1.size() != m2.size()) { - return false; - } - return m1.entrySet().stream().allMatch(e -> e.getValue().equals(m2.get(e.getKey()))); - } - - @SafeVarargs - @SuppressWarnings("varargs") // required to avoid warning - private final void containsExactly( - Map map, Map.Entry... entries) { - maps.assertContainsExactly(info, map, entries); - } - - private static Map toMap(List list) { - return list.stream() - .collect(Collectors.toMap(KeyValue::getKey, kv -> kv.getValue().getStringValue())); - } } diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/ActiveMqIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/ActiveMqIntegrationTest.java index a5c5522bf..4f909fdc5 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/ActiveMqIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/ActiveMqIntegrationTest.java @@ -5,7 +5,8 @@ package io.opentelemetry.contrib.jmxscraper.target_systems; -import static org.assertj.core.api.Assertions.entry; +import static io.opentelemetry.contrib.jmxscraper.assertions.DataPointAttributes.attribute; +import static io.opentelemetry.contrib.jmxscraper.assertions.DataPointAttributes.attributeGroup; import io.opentelemetry.contrib.jmxscraper.JmxScraperContainer; import java.nio.file.Path; @@ -46,9 +47,10 @@ protected MetricsVerifier createMetricsVerifier() { .hasDescription("The number of consumers currently reading from the broker.") .hasUnit("{consumer}") .isUpDownCounter() - .hasDataPointsAttributes( - entry("destination", "ActiveMQ.Advisory.MasterBroker"), - entry("broker", "localhost"))) + .hasDataPointsWithAttributes( + attributeGroup( + attribute("destination", "ActiveMQ.Advisory.MasterBroker"), + attribute("broker", "localhost")))) .add( "activemq.producer.count", metric -> @@ -56,9 +58,10 @@ protected MetricsVerifier createMetricsVerifier() { .hasDescription("The number of producers currently attached to the broker.") .hasUnit("{producer}") .isUpDownCounter() - .hasDataPointsAttributes( - entry("destination", "ActiveMQ.Advisory.MasterBroker"), - entry("broker", "localhost"))) + .hasDataPointsWithAttributes( + attributeGroup( + attribute("destination", "ActiveMQ.Advisory.MasterBroker"), + attribute("broker", "localhost")))) .add( "activemq.connection.count", metric -> @@ -66,7 +69,7 @@ protected MetricsVerifier createMetricsVerifier() { .hasDescription("The total number of current connections.") .hasUnit("{connection}") .isUpDownCounter() - .hasDataPointsAttributes(entry("broker", "localhost"))) + .hasDataPointsWithOneAttribute(attribute("broker", "localhost"))) .add( "activemq.memory.usage", metric -> @@ -74,9 +77,10 @@ protected MetricsVerifier createMetricsVerifier() { .hasDescription("The percentage of configured memory used.") .hasUnit("%") .isGauge() - .hasDataPointsAttributes( - entry("destination", "ActiveMQ.Advisory.MasterBroker"), - entry("broker", "localhost"))) + .hasDataPointsWithAttributes( + attributeGroup( + attribute("destination", "ActiveMQ.Advisory.MasterBroker"), + attribute("broker", "localhost")))) .add( "activemq.disk.store_usage", metric -> @@ -85,7 +89,7 @@ protected MetricsVerifier createMetricsVerifier() { "The percentage of configured disk used for persistent messages.") .hasUnit("%") .isGauge() - .hasDataPointsAttributes(entry("broker", "localhost"))) + .hasDataPointsWithOneAttribute(attribute("broker", "localhost"))) .add( "activemq.disk.temp_usage", metric -> @@ -94,7 +98,7 @@ protected MetricsVerifier createMetricsVerifier() { "The percentage of configured disk used for non-persistent messages.") .hasUnit("%") .isGauge() - .hasDataPointsAttributes(entry("broker", "localhost"))) + .hasDataPointsWithOneAttribute(attribute("broker", "localhost"))) .add( "activemq.message.current", metric -> @@ -102,9 +106,10 @@ protected MetricsVerifier createMetricsVerifier() { .hasDescription("The current number of messages waiting to be consumed.") .hasUnit("{message}") .isUpDownCounter() - .hasDataPointsAttributes( - entry("destination", "ActiveMQ.Advisory.MasterBroker"), - entry("broker", "localhost"))) + .hasDataPointsWithAttributes( + attributeGroup( + attribute("destination", "ActiveMQ.Advisory.MasterBroker"), + attribute("broker", "localhost")))) .add( "activemq.message.expired", metric -> @@ -113,9 +118,10 @@ protected MetricsVerifier createMetricsVerifier() { "The total number of messages not delivered because they expired.") .hasUnit("{message}") .isCounter() - .hasDataPointsAttributes( - entry("destination", "ActiveMQ.Advisory.MasterBroker"), - entry("broker", "localhost"))) + .hasDataPointsWithAttributes( + attributeGroup( + attribute("destination", "ActiveMQ.Advisory.MasterBroker"), + attribute("broker", "localhost")))) .add( "activemq.message.enqueued", metric -> @@ -123,9 +129,10 @@ protected MetricsVerifier createMetricsVerifier() { .hasDescription("The total number of messages received by the broker.") .hasUnit("{message}") .isCounter() - .hasDataPointsAttributes( - entry("destination", "ActiveMQ.Advisory.MasterBroker"), - entry("broker", "localhost"))) + .hasDataPointsWithAttributes( + attributeGroup( + attribute("destination", "ActiveMQ.Advisory.MasterBroker"), + attribute("broker", "localhost")))) .add( "activemq.message.dequeued", metric -> @@ -133,9 +140,10 @@ protected MetricsVerifier createMetricsVerifier() { .hasDescription("The total number of messages delivered to consumers.") .hasUnit("{message}") .isCounter() - .hasDataPointsAttributes( - entry("destination", "ActiveMQ.Advisory.MasterBroker"), - entry("broker", "localhost"))) + .hasDataPointsWithAttributes( + attributeGroup( + attribute("destination", "ActiveMQ.Advisory.MasterBroker"), + attribute("broker", "localhost")))) .add( "activemq.message.wait_time.avg", metric -> @@ -143,8 +151,9 @@ protected MetricsVerifier createMetricsVerifier() { .hasDescription("The average time a message was held on a destination.") .hasUnit("ms") .isGauge() - .hasDataPointsAttributes( - entry("destination", "ActiveMQ.Advisory.MasterBroker"), - entry("broker", "localhost"))); + .hasDataPointsWithAttributes( + attributeGroup( + attribute("destination", "ActiveMQ.Advisory.MasterBroker"), + attribute("broker", "localhost")))); } } diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/CassandraIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/CassandraIntegrationTest.java index a16b4a6a0..7730f34a7 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/CassandraIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/CassandraIntegrationTest.java @@ -5,11 +5,13 @@ package io.opentelemetry.contrib.jmxscraper.target_systems; +import static io.opentelemetry.contrib.jmxscraper.assertions.DataPointAttributes.attribute; +import static io.opentelemetry.contrib.jmxscraper.assertions.DataPointAttributes.attributeGroup; + import io.opentelemetry.contrib.jmxscraper.JmxScraperContainer; +import io.opentelemetry.contrib.jmxscraper.assertions.AttributeMatcherGroup; import java.nio.file.Path; import java.time.Duration; -import java.util.HashMap; -import java.util.Map; import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; @@ -158,10 +160,10 @@ protected MetricsVerifier createMetricsVerifier() { .hasDescription("Number of requests by operation") .hasUnit("1") .isCounter() - .hasDataPointsAttributes( - requestCountAttributes("RangeSlice"), - requestCountAttributes("Read"), - requestCountAttributes("Write"))) + .hasDataPointsWithAttributes( + attributeGroup(attribute("operation", "RangeSlice")), + attributeGroup(attribute("operation", "Read")), + attributeGroup(attribute("operation", "Write")))) .add( "cassandra.client.request.error.count", metric -> @@ -169,7 +171,7 @@ protected MetricsVerifier createMetricsVerifier() { .hasDescription("Number of request errors by operation") .hasUnit("1") .isCounter() - .hasDataPointsAttributes( + .hasDataPointsWithAttributes( errorCountAttributes("RangeSlice", "Timeout"), errorCountAttributes("RangeSlice", "Failure"), errorCountAttributes("RangeSlice", "Unavailable"), @@ -181,16 +183,7 @@ protected MetricsVerifier createMetricsVerifier() { errorCountAttributes("Write", "Unavailable"))); } - private static Map errorCountAttributes(String operation, String status) { - Map map = new HashMap<>(); - map.put("operation", operation); - map.put("status", status); - return map; - } - - private static Map requestCountAttributes(String operation) { - Map map = new HashMap<>(); - map.put("operation", operation); - return map; + private static AttributeMatcherGroup errorCountAttributes(String operation, String status) { + return attributeGroup(attribute("operation", operation), attribute("status", status)); } } diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/HBaseIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/HBaseIntegrationTest.java index 93877e279..7aee15654 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/HBaseIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/HBaseIntegrationTest.java @@ -5,11 +5,9 @@ package io.opentelemetry.contrib.jmxscraper.target_systems; -import static io.opentelemetry.contrib.jmxscraper.target_systems.MetricAssertions.assertGauge; -import static io.opentelemetry.contrib.jmxscraper.target_systems.MetricAssertions.assertGaugeWithAttributes; -import static io.opentelemetry.contrib.jmxscraper.target_systems.MetricAssertions.assertSum; -import static io.opentelemetry.contrib.jmxscraper.target_systems.MetricAssertions.assertSumWithAttributes; -import static org.assertj.core.data.MapEntry.entry; +import static io.opentelemetry.contrib.jmxscraper.assertions.DataPointAttributes.attribute; +import static io.opentelemetry.contrib.jmxscraper.assertions.DataPointAttributes.attributeGroup; +import static io.opentelemetry.contrib.jmxscraper.assertions.DataPointAttributes.attributeWithAnyValue; import io.opentelemetry.contrib.jmxscraper.JmxScraperContainer; import java.nio.file.Path; @@ -36,404 +34,491 @@ protected JmxScraperContainer customizeScraperContainer( } @Override - protected void verifyMetrics() { - waitAndAssertMetrics( - metric -> - assertSumWithAttributes( - metric, - "hbase.master.region_server.count", - "The number of region servers.", - "{server}", - /* isMonotonic= */ false, - attrs -> attrs.contains(entry("state", "dead")), - attrs -> attrs.contains(entry("state", "live"))), - metric -> - assertSum( - metric, - "hbase.master.regions_in_transition.count", - "The number of regions that are in transition.", - "{region}", - /* isMonotonic= */ false), - metric -> - assertSum( - metric, - "hbase.master.regions_in_transition.over_threshold", - "The number of regions that have been in transition longer than a threshold time.", - "{region}", - /* isMonotonic= */ false), - metric -> - assertGauge( - metric, - "hbase.master.regions_in_transition.oldest_age", - "The age of the longest region in transition.", - "ms"), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.region.count", - "The number of regions hosted by the region server.", - "{region}", - /* isMonotonic= */ false, - attrs -> attrs.containsKey("region_server")), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.disk.store_file.count", - "The number of store files on disk currently managed by the region server.", - "{file}", - /* isMonotonic= */ false, - attrs -> attrs.containsKey("region_server")), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.disk.store_file.size", - "Aggregate size of the store files on disk.", - "By", - /* isMonotonic= */ false, - attrs -> attrs.containsKey("region_server")), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.write_ahead_log.count", - "The number of write ahead logs not yet archived.", - "{log}", - /* isMonotonic= */ false, - attrs -> attrs.containsKey("region_server")), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.request.count", - "The number of requests received.", - "{request}", - /* isMonotonic= */ false, - attrs -> { - attrs.contains(entry("state", "write")); - attrs.containsKey("region_server"); - }, - attrs -> { - attrs.contains(entry("state", "read")); - attrs.containsKey("region_server"); - }), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.queue.length", - "The number of RPC handlers actively servicing requests.", - "{handler}", - /* isMonotonic= */ false, - attrs -> { - attrs.contains(entry("state", "flush")); - attrs.containsKey("region_server"); - }, - attrs -> { - attrs.contains(entry("state", "compaction")); - attrs.containsKey("region_server"); - }), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.blocked_update.time", - "Amount of time updates have been blocked so the memstore can be flushed.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.block_cache.operation.count", - "Number of block cache hits/misses.", - "{operation}", - attrs -> { - attrs.contains(entry("state", "miss")); - attrs.containsKey("region_server"); - }, - attrs -> { - attrs.contains(entry("state", "hit")); - attrs.containsKey("region_server"); - }), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.files.local", - "Percent of store file data that can be read from the local.", - "%", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.append.latency.p99", - "Append operation 99th Percentile latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.append.latency.max", - "Append operation max latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.append.latency.min", - "Append operation minimum latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.append.latency.mean", - "Append operation mean latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.append.latency.median", - "Append operation median latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.delete.latency.p99", - "Delete operation 99th Percentile latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.delete.latency.max", - "Delete operation max latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.delete.latency.min", - "Delete operation minimum latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.delete.latency.mean", - "Delete operation mean latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.delete.latency.median", - "Delete operation median latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.put.latency.p99", - "Put operation 99th Percentile latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.put.latency.max", - "Put operation max latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.put.latency.min", - "Put operation minimum latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.put.latency.mean", - "Put operation mean latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.put.latency.median", - "Put operation median latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.get.latency.p99", - "Get operation 99th Percentile latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.get.latency.max", - "Get operation max latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.get.latency.min", - "Get operation minimum latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.get.latency.mean", - "Get operation mean latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.get.latency.median", - "Get operation median latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.replay.latency.p99", - "Replay operation 99th Percentile latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.replay.latency.max", - "Replay operation max latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.replay.latency.min", - "Replay operation minimum latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.replay.latency.mean", - "Replay operation mean latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.replay.latency.median", - "Replay operation median latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.increment.latency.p99", - "Increment operation 99th Percentile latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.increment.latency.max", - "Increment operation max latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.increment.latency.min", - "Increment operation minimum latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.increment.latency.mean", - "Increment operation mean latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertGaugeWithAttributes( - metric, - "hbase.region_server.operation.increment.latency.median", - "Increment operation median latency.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.operations.slow", - "Number of operations that took over 1000ms to complete.", - "{operation}", - /* isMonotonic= */ false, - attrs -> attrs.contains(entry("operation", "delete")), - attrs -> attrs.contains(entry("operation", "append")), - attrs -> attrs.contains(entry("operation", "get")), - attrs -> attrs.contains(entry("operation", "put")), - attrs -> attrs.contains(entry("operation", "increment"))), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.open_connection.count", - "The number of open connections at the RPC layer.", - "{connection}", - /* isMonotonic= */ false, - attrs -> attrs.containsKey("region_server")), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.active_handler.count", - "The number of RPC handlers actively servicing requests.", - "{handler}", - /* isMonotonic= */ false, - attrs -> attrs.containsKey("region_server")), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.queue.request.count", - "The number of currently enqueued requests.", - "{request}", - /* isMonotonic= */ false, - attrs -> attrs.contains(entry("state", "replication")), - attrs -> attrs.contains(entry("state", "user")), - attrs -> attrs.contains(entry("state", "priority"))), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.authentication.count", - "Number of client connection authentication failures/successes.", - "{authentication request}", - /* isMonotonic= */ false, - attrs -> attrs.contains(entry("state", "successes")), - attrs -> attrs.contains(entry("state", "failures"))), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.gc.time", - "Time spent in garbage collection.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.gc.young_gen.time", - "Time spent in garbage collection of the young generation.", - "ms", - attrs -> attrs.containsKey("region_server")), - metric -> - assertSumWithAttributes( - metric, - "hbase.region_server.gc.old_gen.time", - "Time spent in garbage collection of the old generation.", - "ms", - attrs -> attrs.containsKey("region_server"))); + protected MetricsVerifier createMetricsVerifier() { + return MetricsVerifier.create() + .add( + "hbase.master.region_server.count", + metric -> + metric + .isUpDownCounter() + .hasDescription("The number of region servers.") + .hasUnit("{server}") + .hasDataPointsWithAttributes( + attributeGroup(attribute("state", "dead")), + attributeGroup(attribute("state", "live")))) + .add( + "hbase.master.regions_in_transition.count", + metric -> + metric + .isUpDownCounter() + .hasDescription("The number of region servers.") + .hasUnit("{server}") + .hasDataPointsWithoutAttributes()) + .add( + "hbase.master.regions_in_transition.count", + metric -> + metric + .isUpDownCounter() + .hasDescription("The number of regions that are in transition.") + .hasUnit("{region}") + .hasDataPointsWithoutAttributes()) + .add( + "hbase.master.regions_in_transition.count", + metric -> + metric + .isUpDownCounter() + .hasDescription("The number of regions that are in transition.") + .hasUnit("{region}") + .hasDataPointsWithoutAttributes()) + .add( + "hbase.master.regions_in_transition.over_threshold", + metric -> + metric + .isUpDownCounter() + .hasDescription( + "The number of regions that have been in transition longer than a threshold time.") + .hasUnit("{region}") + .hasDataPointsWithoutAttributes()) + .add( + "hbase.master.regions_in_transition.oldest_age", + metric -> + metric + .isGauge() + .hasDescription("The age of the longest region in transition.") + .hasUnit("ms") + .hasDataPointsWithoutAttributes()) + .add( + "hbase.region_server.region.count", + metric -> + metric + .isUpDownCounter() + .hasDescription("The number of regions hosted by the region server.") + .hasUnit("{region}") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.disk.store_file.count", + metric -> + metric + .isUpDownCounter() + .hasDescription( + "The number of store files on disk currently managed by the region server.") + .hasUnit("{file}") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.disk.store_file.size", + metric -> + metric + .isUpDownCounter() + .hasDescription("Aggregate size of the store files on disk.") + .hasUnit("By") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.write_ahead_log.count", + metric -> + metric + .isUpDownCounter() + .hasDescription("The number of write ahead logs not yet archived.") + .hasUnit("{log}") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.request.count", + metric -> + metric + .isUpDownCounter() + .hasDescription("The number of requests received.") + .hasUnit("{request}") + .hasDataPointsWithAttributes( + attributeGroup( + attribute("state", "write"), attributeWithAnyValue("region_server")), + attributeGroup( + attribute("state", "read"), attributeWithAnyValue("region_server")))) + .add( + "hbase.region_server.queue.length", + metric -> + metric + .isUpDownCounter() + .hasDescription("The number of RPC handlers actively servicing requests.") + .hasUnit("{handler}") + .hasDataPointsWithAttributes( + attributeGroup( + attribute("state", "flush"), attributeWithAnyValue("region_server")), + attributeGroup( + attribute("state", "compaction"), + attributeWithAnyValue("region_server")))) + .add( + "hbase.region_server.blocked_update.time", + metric -> + metric + .isGauge() + .hasDescription( + "Amount of time updates have been blocked so the memstore can be flushed.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.block_cache.operation.count", + metric -> + metric + .isGauge() + .hasDescription("Number of block cache hits/misses.") + .hasUnit("{operation}") + .hasDataPointsWithAttributes( + attributeGroup( + attribute("state", "miss"), attributeWithAnyValue("region_server")), + attributeGroup( + attribute("state", "hit"), attributeWithAnyValue("region_server")))) + .add( + "hbase.region_server.files.local", + metric -> + metric + .isGauge() + .hasDescription("Percent of store file data that can be read from the local.") + .hasUnit("%") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + + // Operation: append --------------------------------------------------------------------- + .add( + "hbase.region_server.operation.append.latency.p99", + metric -> + metric + .isGauge() + .hasDescription("Append operation 99th Percentile latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.append.latency.max", + metric -> + metric + .isGauge() + .hasDescription("Append operation max latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.append.latency.min", + metric -> + metric + .isGauge() + .hasDescription("Append operation minimum latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.append.latency.mean", + metric -> + metric + .isGauge() + .hasDescription("Append operation mean latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.append.latency.median", + metric -> + metric + .isGauge() + .hasDescription("Append operation median latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + + // Operation: delete --------------------------------------------------------------------- + .add( + "hbase.region_server.operation.delete.latency.p99", + metric -> + metric + .isGauge() + .hasDescription("Delete operation 99th Percentile latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.delete.latency.max", + metric -> + metric + .isGauge() + .hasDescription("Delete operation max latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.delete.latency.min", + metric -> + metric + .isGauge() + .hasDescription("Delete operation minimum latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.delete.latency.mean", + metric -> + metric + .isGauge() + .hasDescription("Delete operation mean latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.delete.latency.median", + metric -> + metric + .isGauge() + .hasDescription("Delete operation median latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + + // Operation: put --------------------------------------------------------------------- + .add( + "hbase.region_server.operation.put.latency.p99", + metric -> + metric + .isGauge() + .hasDescription("Put operation 99th Percentile latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.put.latency.max", + metric -> + metric + .isGauge() + .hasDescription("Put operation max latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.put.latency.min", + metric -> + metric + .isGauge() + .hasDescription("Put operation minimum latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.put.latency.mean", + metric -> + metric + .isGauge() + .hasDescription("Put operation mean latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.put.latency.median", + metric -> + metric + .isGauge() + .hasDescription("Put operation median latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + + // Operation: get --------------------------------------------------------------------- + .add( + "hbase.region_server.operation.get.latency.p99", + metric -> + metric + .isGauge() + .hasDescription("Get operation 99th Percentile latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.get.latency.max", + metric -> + metric + .isGauge() + .hasDescription("Get operation max latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.get.latency.min", + metric -> + metric + .isGauge() + .hasDescription("Get operation minimum latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.get.latency.mean", + metric -> + metric + .isGauge() + .hasDescription("Get operation mean latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.get.latency.median", + metric -> + metric + .isGauge() + .hasDescription("Get operation median latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + + // Operation: replay --------------------------------------------------------------------- + .add( + "hbase.region_server.operation.replay.latency.p99", + metric -> + metric + .isGauge() + .hasDescription("Replay operation 99th Percentile latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.replay.latency.max", + metric -> + metric + .isGauge() + .hasDescription("Replay operation max latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.replay.latency.min", + metric -> + metric + .isGauge() + .hasDescription("Replay operation minimum latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.replay.latency.mean", + metric -> + metric + .isGauge() + .hasDescription("Replay operation mean latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.replay.latency.median", + metric -> + metric + .isGauge() + .hasDescription("Replay operation median latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + + // Operation: increment ------------------------------------------------------------------- + .add( + "hbase.region_server.operation.increment.latency.p99", + metric -> + metric + .isGauge() + .hasDescription("Increment operation 99th Percentile latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.increment.latency.max", + metric -> + metric + .isGauge() + .hasDescription("Increment operation max latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.increment.latency.min", + metric -> + metric + .isGauge() + .hasDescription("Increment operation minimum latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.increment.latency.mean", + metric -> + metric + .isGauge() + .hasDescription("Increment operation mean latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.operation.increment.latency.median", + metric -> + metric + .isGauge() + .hasDescription("Increment operation median latency.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + // ----------------------------------------------------------------------------------- + + .add( + "hbase.region_server.operations.slow", + metric -> + metric + .isUpDownCounter() + .hasDescription("Number of operations that took over 1000ms to complete.") + .hasUnit("{operation}") + .hasDataPointsWithAttributes( + attributeGroup( + attribute("operation", "delete"), + attributeWithAnyValue("region_server")), + attributeGroup( + attribute("operation", "append"), + attributeWithAnyValue("region_server")), + attributeGroup( + attribute("operation", "get"), attributeWithAnyValue("region_server")), + attributeGroup( + attribute("operation", "put"), attributeWithAnyValue("region_server")), + attributeGroup( + attribute("operation", "increment"), + attributeWithAnyValue("region_server")))) + .add( + "hbase.region_server.open_connection.count", + metric -> + metric + .isUpDownCounter() + .hasDescription("The number of open connections at the RPC layer.") + .hasUnit("{connection}") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.active_handler.count", + metric -> + metric + .isUpDownCounter() + .hasDescription("The number of RPC handlers actively servicing requests.") + .hasUnit("{handler}") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.queue.request.count", + metric -> + metric + .isUpDownCounter() + .hasDescription("The number of currently enqueued requests.") + .hasUnit("{request}") + .hasDataPointsWithAttributes( + attributeGroup( + attribute("state", "replication"), + attributeWithAnyValue("region_server")), + attributeGroup( + attribute("state", "user"), attributeWithAnyValue("region_server")), + attributeGroup( + attribute("state", "priority"), + attributeWithAnyValue("region_server")))) + .add( + "hbase.region_server.authentication.count", + metric -> + metric + .isUpDownCounter() + .hasDescription( + "Number of client connection authentication failures/successes.") + .hasUnit("{authentication request}") + .hasDataPointsWithAttributes( + attributeGroup( + attribute("state", "successes"), + attributeWithAnyValue("region_server")), + attributeGroup( + attribute("state", "failures"), + attributeWithAnyValue("region_server")))) + .add( + "hbase.region_server.gc.time", + metric -> + metric + .isCounter() + .hasDescription("Time spent in garbage collection.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.gc.young_gen.time", + metric -> + metric + .isCounter() + .hasDescription("Time spent in garbage collection of the young generation.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))) + .add( + "hbase.region_server.gc.old_gen.time", + metric -> + metric + .isCounter() + .hasDescription("Time spent in garbage collection of the old generation.") + .hasUnit("ms") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("region_server"))); } } diff --git a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/JettyIntegrationTest.java b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/JettyIntegrationTest.java index fb1f489b1..86097cbaa 100644 --- a/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/JettyIntegrationTest.java +++ b/jmx-scraper/src/integrationTest/java/io/opentelemetry/contrib/jmxscraper/target_systems/JettyIntegrationTest.java @@ -5,10 +5,9 @@ package io.opentelemetry.contrib.jmxscraper.target_systems; -import static io.opentelemetry.contrib.jmxscraper.target_systems.MetricAssertions.assertGauge; -import static io.opentelemetry.contrib.jmxscraper.target_systems.MetricAssertions.assertGaugeWithAttributes; -import static io.opentelemetry.contrib.jmxscraper.target_systems.MetricAssertions.assertSumWithAttributes; -import static io.opentelemetry.contrib.jmxscraper.target_systems.MetricAssertions.assertSumWithAttributesMultiplePoints; +import static io.opentelemetry.contrib.jmxscraper.assertions.DataPointAttributes.attribute; +import static io.opentelemetry.contrib.jmxscraper.assertions.DataPointAttributes.attributeGroup; +import static io.opentelemetry.contrib.jmxscraper.assertions.DataPointAttributes.attributeWithAnyValue; import io.opentelemetry.contrib.jmxscraper.JmxScraperContainer; import java.nio.file.Path; @@ -55,51 +54,60 @@ protected JmxScraperContainer customizeScraperContainer( } @Override - protected void verifyMetrics() { - waitAndAssertMetrics( - metric -> - assertSumWithAttributes( - metric, - "jetty.session.count", - "The number of sessions established in total.", - "{session}", - attrs -> attrs.containsKey("resource")), - metric -> - assertSumWithAttributes( - metric, - "jetty.session.time.total", - "The total time sessions have been active.", - "s", - attrs -> attrs.containsKey("resource")), - metric -> - assertGaugeWithAttributes( - metric, - "jetty.session.time.max", - "The maximum amount of time a session has been active.", - "s", - attrs -> attrs.containsKey("resource")), - metric -> - assertSumWithAttributesMultiplePoints( - metric, - "jetty.select.count", - "The number of select calls.", - "{operation}", - /* isMonotonic= */ true, - // minor divergence from jetty.groovy with extra metrics attributes - attrs -> attrs.containsKey("context").containsKey("id")), - metric -> - assertGaugeWithAttributes( - metric, - "jetty.thread.count", - "The current number of threads.", - "{thread}", - attrs -> attrs.containsEntry("state", "busy"), - attrs -> attrs.containsEntry("state", "idle")), - metric -> - assertGauge( - metric, - "jetty.thread.queue.count", - "The current number of threads in the queue.", - "{thread}")); + protected MetricsVerifier createMetricsVerifier() { + return MetricsVerifier.create() + .add( + "jetty.session.count", + metric -> + metric + .isCounter() + .hasDescription("The number of sessions established in total.") + .hasUnit("{session}") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("resource"))) + .add( + "jetty.session.time.total", + metric -> + metric + .isCounter() + .hasDescription("The total time sessions have been active.") + .hasUnit("s") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("resource"))) + .add( + "jetty.session.time.max", + metric -> + metric + .isGauge() + .hasDescription("The maximum amount of time a session has been active.") + .hasUnit("s") + .hasDataPointsWithOneAttribute(attributeWithAnyValue("resource"))) + .add( + "jetty.select.count", + metric -> + metric + .isCounter() + .hasDescription("The number of select calls.") + .hasUnit("{operation}") + .hasDataPointsWithAttributes( + attributeGroup( + attributeWithAnyValue("context"), attributeWithAnyValue("id")))) + .add( + "jetty.thread.count", + metric -> + metric + .isGauge() + .hasDescription("The current number of threads.") + .hasUnit("{thread}") + .hasDataPointsWithAttributes( + attributeGroup(attribute("state", "busy")), + attributeGroup(attribute("state", "idle")))) + .add( + "jetty.thread.queue.count", + metric -> + metric + .isGauge() + .hasDescription("The current number of threads in the queue.") + .hasUnit("{thread}") + .hasDataPointsWithoutAttributes() // Got rid of id (see jetty.yaml) + ); } }