Skip to content

Commit a4e539c

Browse files
committed
add changelog, ST, unit test and documentation changes
Signed-off-by: Lukas Kral <lukywill16@gmail.com>
1 parent bbadea8 commit a4e539c

File tree

6 files changed

+69
-4
lines changed

6 files changed

+69
-4
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
* Allow upgrading from unknown Apache Kafka versions (that might be used in Strimzi patch releases)
99
* Add support for Kafka 4.1.2
1010
* Remove PreferredLeaderElectionGoal from Cruise Control's default.goals list
11+
* Enable configuring `allowList` of Strimzi Metrics Reporter dynamically
1112

1213
### Major changes, deprecations, and removals
1314

cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaConfigurationDiffTest.java

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -396,6 +396,15 @@ public void testPropertiesAllScopesChanged() {
396396
assertThat(kcd.getConfigDiff(Scope.READ_ONLY).size(), is(1));
397397
}
398398

399+
@Test
400+
public void testPrometheusMetricsReporterAllowList() {
401+
List<ConfigEntry> ces = singletonList(new ConfigEntry("prometheus.metrics.reporter.allowlist", "kafka_controller.*"));
402+
KafkaConfigurationDiff kcd = new KafkaConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(ces),
403+
getDesiredConfiguration(ces), kafkaVersion, brokerNodeRef, false, true);
404+
assertThat(kcd.getDiffSize(), is(0));
405+
assertThat(kcd.canBeUpdatedDynamically(), is(true));
406+
}
407+
399408
@Test
400409
public void testAreDoublesEqual() {
401410
assertThat(KafkaConfigurationDiff.areDoublesEqual("test.option", Map.of(), Map.of("test.option", "0.8")), is(false));

config-model-generator/src/main/java/io/strimzi/build/kafka/metadata/KafkaConfigModelGenerator.java

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ private KafkaConfigModelGenerator() {
5959
public static void main(String[] args) throws Exception {
6060
String version = kafkaVersion();
6161
Map<String, ConfigModel> configs = configs(version);
62-
addCustomPluginConfig(configs);
62+
addPrometheusMetricsReporterAllowListConfig(configs);
6363

6464
ObjectMapper mapper = JsonMapper.builder().enable(SerializationFeature.INDENT_OUTPUT).enable(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY).build();
6565
ConfigModels root = new ConfigModels();
@@ -135,7 +135,13 @@ private static Map<String, ConfigModel> configs(String version) throws NoSuchMet
135135
return result;
136136
}
137137

138-
private static void addCustomPluginConfig(Map<String, ConfigModel> configs) {
138+
/**
139+
* Adds `prometheus.metrics.reporter.allowlist` with its configuration to the ConfigModel,
140+
* so we are able to update it dynamically in Kafka brokers/controllers.
141+
*
142+
* @param configs config models of the Kafka version that should be updated with another config model
143+
*/
144+
private static void addPrometheusMetricsReporterAllowListConfig(Map<String, ConfigModel> configs) {
139145
String prometheusMetricAllowListName = "prometheus.metrics.reporter.allowlist";
140146
ConfigModel prometheusMetricConfigModel = new ConfigModel();
141147
prometheusMetricConfigModel.setScope(Scope.CLUSTER_WIDE);

documentation/modules/con-common-configuration-properties.adoc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -557,6 +557,7 @@ The Strimzi Metrics Reporter offers a lightweight solution for exposing Kafka me
557557

558558
To enable Strimzi Metrics Reporter, set the type to `strimziMetricsReporter`.
559559
The `allowList` configuration is a comma-separated list of regex patterns to filter the metrics that are collected. This defaults to `.*`, which allows all metrics.
560+
Changes to this field will not trigger rolling update, the configuration is dynamically updated for Kafka brokers and controllers.
560561

561562
NOTE: Using `strimziMetricsReporter` is only supported in the Kafka brokers and controllers at the moment.
562563

systemtest/src/main/java/io/strimzi/systemtest/utils/specific/MetricsUtils.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -183,12 +183,12 @@ public static void assertContainsMetric(List<Metric> metrics, String metricName)
183183
assertThat(String.format("metric '%s' is not present in the list of metrics", metricName), containsMetric, is(true));
184184
}
185185

186-
private static List<Double> createPatternAndCollect(BaseMetricsCollector collector, String metric) {
186+
public static List<Double> createPatternAndCollect(BaseMetricsCollector collector, String metric) {
187187
Pattern pattern = Pattern.compile(metric + " ([\\d.^\\n]+)", Pattern.CASE_INSENSITIVE);
188188
return collector.waitForSpecificMetricAndCollect(pattern);
189189
}
190190

191-
private static List<Double> createPatternAndCollectWithoutWait(BaseMetricsCollector collector, String metric) {
191+
public static List<Double> createPatternAndCollectWithoutWait(BaseMetricsCollector collector, String metric) {
192192
Pattern pattern = Pattern.compile(metric + " ([\\d.^\\n]+)", Pattern.CASE_INSENSITIVE);
193193
return collector.collectSpecificMetric(pattern);
194194
}

systemtest/src/test/java/io/strimzi/systemtest/metrics/StrimziMetricsReporterST.java

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,15 +11,18 @@
1111
import io.skodjob.annotations.TestDoc;
1212
import io.skodjob.kubetest4j.resources.KubeResourceManager;
1313
import io.strimzi.api.kafka.model.bridge.KafkaBridgeResources;
14+
import io.strimzi.api.kafka.model.common.metrics.StrimziMetricsReporter;
1415
import io.strimzi.api.kafka.model.kafka.KafkaResources;
1516
import io.strimzi.operator.common.Annotations;
1617
import io.strimzi.systemtest.AbstractST;
1718
import io.strimzi.systemtest.Environment;
1819
import io.strimzi.systemtest.TestConstants;
20+
import io.strimzi.systemtest.annotations.IsolatedTest;
1921
import io.strimzi.systemtest.annotations.ParallelTest;
2022
import io.strimzi.systemtest.docs.TestDocsLabels;
2123
import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClients;
2224
import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClientsBuilder;
25+
import io.strimzi.systemtest.labels.LabelSelectors;
2326
import io.strimzi.systemtest.performance.gather.collectors.BaseMetricsCollector;
2427
import io.strimzi.systemtest.resources.operator.SetupClusterOperator;
2528
import io.strimzi.systemtest.storage.TestStorage;
@@ -31,12 +34,18 @@
3134
import io.strimzi.systemtest.templates.crd.KafkaTemplates;
3235
import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates;
3336
import io.strimzi.systemtest.templates.specific.ScraperTemplates;
37+
import io.strimzi.systemtest.utils.RollingUpdateUtils;
38+
import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils;
3439
import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils;
40+
import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils;
41+
import io.strimzi.systemtest.utils.specific.MetricsUtils;
3542
import org.apache.logging.log4j.LogManager;
3643
import org.apache.logging.log4j.Logger;
3744
import org.junit.jupiter.api.BeforeAll;
3845
import org.junit.jupiter.api.Tag;
3946

47+
import java.util.List;
48+
import java.util.Map;
4049
import java.util.concurrent.TimeUnit;
4150
import java.util.concurrent.locks.LockSupport;
4251

@@ -51,6 +60,8 @@
5160
import static io.strimzi.systemtest.utils.specific.MetricsUtils.assertMetricValue;
5261
import static io.strimzi.systemtest.utils.specific.MetricsUtils.assertMetricValueHigherThanOrEqualTo;
5362
import static io.strimzi.systemtest.utils.specific.MetricsUtils.assertMetricValueNotNull;
63+
import static org.hamcrest.CoreMatchers.is;
64+
import static org.hamcrest.MatcherAssert.assertThat;
5465
import static org.junit.jupiter.api.Assumptions.assumeFalse;
5566

5667
@Tag(SANITY)
@@ -271,6 +282,43 @@ void testKafkaBridgeMetrics() {
271282
assertMetricValueNotNull(bridgeCollector, "kafka_consumer_consumer_metrics_connection_count\\{.*}");
272283
}
273284

285+
@IsolatedTest
286+
void testDynamicReconfigurationAllowList() {
287+
Map<String, String> kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), LabelSelectors.allKafkaPodsLabelSelector(testStorage.getClusterName()));
288+
289+
// change configuration to disable kafka_server.* metrics and add kafka_controller.* metrics
290+
KafkaUtils.replace(testStorage.getNamespaceName(), testStorage.getClusterName(), kafka -> {
291+
StrimziMetricsReporter config = (StrimziMetricsReporter) kafka.getSpec().getKafka().getMetricsConfig();
292+
config.getValues().setAllowList(List.of("kafka_controller.*"));
293+
294+
kafka.getSpec().getKafka().setMetricsConfig(config);
295+
}
296+
);
297+
298+
RollingUpdateUtils.waitForNoRollingUpdate(testStorage.getNamespaceName(), LabelSelectors.allKafkaPodsLabelSelector(testStorage.getClusterName()), kafkaPods);
299+
300+
LOGGER.info("Check if Kafka Pods are missing the 'kafka_server_' metrics");
301+
kafkaCollector.collectMetricsFromPods(TestConstants.METRICS_COLLECT_TIMEOUT);
302+
303+
assertThat(MetricsUtils.createPatternAndCollectWithoutWait(kafkaCollector, "kafka_server_replicamanager_leadercount").isEmpty(), is(true));
304+
305+
LOGGER.info("Changing back to previous state");
306+
KafkaUtils.replace(testStorage.getNamespaceName(), testStorage.getClusterName(), kafka -> {
307+
StrimziMetricsReporter config = (StrimziMetricsReporter) kafka.getSpec().getKafka().getMetricsConfig();
308+
config.getValues().setAllowList(List.of("kafka_server.*"));
309+
310+
kafka.getSpec().getKafka().setMetricsConfig(config);
311+
}
312+
);
313+
314+
RollingUpdateUtils.waitForNoRollingUpdate(testStorage.getNamespaceName(), LabelSelectors.allKafkaPodsLabelSelector(testStorage.getClusterName()), kafkaPods);
315+
316+
LOGGER.info("Check if Kafka Pods are not missing the 'kafka_server_' metrics");
317+
kafkaCollector.collectMetricsFromPods(TestConstants.METRICS_COLLECT_TIMEOUT);
318+
319+
assertThat(MetricsUtils.createPatternAndCollect(kafkaCollector, "kafka_server_replicamanager_leadercount").isEmpty(), is(false));
320+
}
321+
274322
@BeforeAll
275323
void setupEnvironment() {
276324
testStorage = new TestStorage(KubeResourceManager.get().getTestContext());

0 commit comments

Comments
 (0)