Skip to content

Commit 951b8c3

Browse files
committed
Merge remote-tracking branch 'origin/main' into feature/reset-connector-offsets
2 parents cffa239 + 0ad8695 commit 951b8c3

File tree

31 files changed

+444
-234
lines changed

31 files changed

+444
-234
lines changed

.dev/dev_arm64.yaml

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,8 @@ services:
3232
KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: 'true'
3333

3434
kafka0:
35-
image: confluentinc/cp-kafka:7.2.1.arm64
35+
image: confluentinc/cp-kafka:7.6.0.arm64
36+
user: "0:0"
3637
hostname: kafka0
3738
container_name: kafka0
3839
ports:
@@ -56,12 +57,10 @@ services:
5657
KAFKA_JMX_PORT: 9997
5758
# KAFKA_JMX_HOSTNAME: localhost # uncomment this line and comment the next one if running with kafka-ui as a jar
5859
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
59-
volumes:
60-
- ../documentation/compose/scripts/update_run.sh:/tmp/update_run.sh
61-
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
60+
CLUSTER_ID: 'MkU3OEVBNTcwNTJENDM2Qk'
6261

6362
schema-registry0:
64-
image: confluentinc/cp-schema-registry:7.2.1.arm64
63+
image: confluentinc/cp-schema-registry:7.6.0.arm64
6564
ports:
6665
- 8085:8085
6766
depends_on:
@@ -77,7 +76,7 @@ services:
7776
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
7877

7978
kafka-connect0:
80-
image: confluentinc/cp-kafka-connect:7.2.1.arm64
79+
image: confluentinc/cp-kafka-connect:7.6.0.arm64
8180
ports:
8281
- 8083:8083
8382
depends_on:
@@ -102,7 +101,7 @@ services:
102101
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components,/usr/local/share/kafka/plugins,/usr/share/filestream-connectors"
103102

104103
ksqldb0:
105-
image: confluentinc/ksqldb-server:0.18.0
104+
image: confluentinc/cp-ksqldb-server:7.6.0.arm64
106105
depends_on:
107106
- kafka0
108107
- kafka-connect0
@@ -120,7 +119,7 @@ services:
120119
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
121120

122121
kafka-init-topics:
123-
image: confluentinc/cp-kafka:7.2.1.arm64
122+
image: confluentinc/cp-kafka:7.6.0.arm64
124123
volumes:
125124
- ../documentation/compose/data/message.json:/data/message.json
126125
depends_on:

.github/workflows/build-public-image.yml

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,9 @@ on:
66
types: ['labeled']
77

88
permissions:
9+
id-token: write
910
contents: read
11+
pull-requests: write
1012

1113
jobs:
1214
build:
@@ -47,12 +49,11 @@ jobs:
4749
key: ${{ runner.os }}-buildx-${{ github.sha }}
4850
restore-keys: |
4951
${{ runner.os }}-buildx-
50-
- name: Configure AWS credentials for Kafka-UI account
52+
- name: Configure AWS Credentials
5153
uses: aws-actions/configure-aws-credentials@v4
5254
with:
53-
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
54-
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
5555
aws-region: us-east-1
56+
role-to-assume: ${{ secrets.AWS_ROLE }}
5657
- name: Login to Amazon ECR
5758
id: login-ecr
5859
uses: aws-actions/amazon-ecr-login@v2
@@ -65,7 +66,7 @@ jobs:
6566
builder: ${{ steps.buildx.outputs.name }}
6667
context: api
6768
push: true
68-
tags: public.ecr.aws/kafbat/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
69+
tags: ${{ vars.ECR_REGISTRY }}/${{ github.repository }}:${{ steps.extract_branch.outputs.tag }}
6970
build-args: |
7071
JAR_FILE=api-${{ steps.build.outputs.version }}.jar
7172
cache-from: type=local,src=/tmp/.buildx-cache
@@ -75,6 +76,6 @@ jobs:
7576
with:
7677
issue-number: ${{ github.event.pull_request.number }}
7778
body: |
78-
Image published at public.ecr.aws/kafbat/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
79+
Image published at ${{ vars.ECR_REGISTRY }}/${{ github.repository }}:${{ steps.extract_branch.outputs.tag }}
7980
outputs:
8081
tag: ${{ steps.extract_branch.outputs.tag }}

.github/workflows/docker_publish.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ jobs:
8585
echo "REGISTRY=${{ matrix.registry }}" >> $GITHUB_ENV
8686
echo "REPOSITORY=${{ github.repository }}" >> $GITHUB_ENV
8787
elif [ ${{ matrix.registry }} == 'ecr' ]; then
88-
echo "REGISTRY=${{ steps.login-ecr-public.outputs.registry }}" >> $GITHUB_ENV
88+
echo "REGISTRY=${{ vars.ECR_REGISTRY }}" >> $GITHUB_ENV
8989
echo "REPOSITORY=${{ github.repository }}" >> $GITHUB_ENV
9090
else
9191
echo "REGISTRY=" >> $GITHUB_ENV

api/pom.xml

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@
9191
<dependency>
9292
<groupId>software.amazon.msk</groupId>
9393
<artifactId>aws-msk-iam-auth</artifactId>
94-
<version>2.1.0</version>
94+
<version>2.2.0</version>
9595
</dependency>
9696

9797
<dependency>
@@ -266,18 +266,6 @@
266266
<artifactId>cel</artifactId>
267267
</dependency>
268268
<!-- CVE fixes -->
269-
<dependency>
270-
<groupId>ch.qos.logback</groupId>
271-
<artifactId>logback-classic</artifactId>
272-
<version>1.4.12</version>
273-
</dependency>
274-
<!-- CVE fixes -->
275-
<dependency>
276-
<groupId>ch.qos.logback</groupId>
277-
<artifactId>logback-core</artifactId>
278-
<version>1.4.12</version>
279-
</dependency>
280-
<!-- CVE fixes -->
281269
<dependency>
282270
<groupId>com.squareup.okhttp3</groupId>
283271
<artifactId>logging-interceptor</artifactId>
@@ -289,7 +277,6 @@
289277
<artifactId>commons-compress</artifactId>
290278
<version>1.26.0</version>
291279
</dependency>
292-
293280
</dependencies>
294281

295282
<build>

api/src/main/java/io/kafbat/ui/config/ReadOnlyModeFilter.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ public class ReadOnlyModeFilter implements WebFilter {
2525
Pattern.compile("/api/clusters/(?<clusterName>[^/]++)");
2626

2727
private static final Set<Pattern> SAFE_ENDPOINTS = Set.of(
28-
Pattern.compile("/api/clusters/[^/]+/topics/[^/]+/(smartfilters)$")
28+
Pattern.compile("/api/clusters/[^/]+/topics/[^/]+/(smartfilters|analysis)$")
2929
);
3030

3131
private final ClustersStorage clustersStorage;

api/src/main/java/io/kafbat/ui/controller/ConsumerGroupsController.java

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,24 @@ public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName,
5959
.thenReturn(ResponseEntity.ok().build());
6060
}
6161

62+
@Override
63+
public Mono<ResponseEntity<Void>> deleteConsumerGroupOffsets(String clusterName,
64+
String groupId,
65+
String topicName,
66+
ServerWebExchange exchange) {
67+
var context = AccessContext.builder()
68+
.cluster(clusterName)
69+
.consumerGroupActions(groupId, RESET_OFFSETS)
70+
.topicActions(topicName, TopicAction.VIEW)
71+
.operationName("deleteConsumerGroupOffsets")
72+
.build();
73+
74+
return validateAccess(context)
75+
.then(consumerGroupService.deleteConsumerGroupOffset(getCluster(clusterName), groupId, topicName))
76+
.doOnEach(sig -> audit(context, sig))
77+
.thenReturn(ResponseEntity.ok().build());
78+
}
79+
6280
@Override
6381
public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(String clusterName,
6482
String consumerGroupId,

api/src/main/java/io/kafbat/ui/model/InternalBrokerConfig.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,12 @@ public class InternalBrokerConfig {
1616
private final boolean isReadOnly;
1717
private final List<ConfigEntry.ConfigSynonym> synonyms;
1818

19-
public static InternalBrokerConfig from(ConfigEntry configEntry) {
19+
public static InternalBrokerConfig from(ConfigEntry configEntry, boolean readOnlyCluster) {
2020
InternalBrokerConfig.InternalBrokerConfigBuilder builder = InternalBrokerConfig.builder()
2121
.name(configEntry.name())
2222
.value(configEntry.value())
2323
.source(configEntry.source())
24-
.isReadOnly(configEntry.isReadOnly())
24+
.isReadOnly(readOnlyCluster || configEntry.isReadOnly())
2525
.isSensitive(configEntry.isSensitive())
2626
.synonyms(configEntry.synonyms());
2727
return builder.build();

api/src/main/java/io/kafbat/ui/service/BrokerService.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ private Flux<InternalBrokerConfig> getBrokersConfig(KafkaCluster cluster, Intege
5959
}
6060
return loadBrokersConfig(cluster, brokerId)
6161
.map(list -> list.stream()
62-
.map(InternalBrokerConfig::from)
62+
.map(configEntry -> InternalBrokerConfig.from(configEntry, cluster.isReadOnly()))
6363
.collect(Collectors.toList()))
6464
.flatMapMany(Flux::fromIterable);
6565
}

api/src/main/java/io/kafbat/ui/service/ConsumerGroupService.java

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -209,12 +209,13 @@ private Mono<List<ConsumerGroupDescription>> describeConsumerGroups(ReactiveAdmi
209209
}
210210

211211

212-
private Mono<List<ConsumerGroupDescription>> loadDescriptionsByInternalConsumerGroups(ReactiveAdminClient ac,
213-
List<ConsumerGroupListing> groups,
214-
Comparator<GroupWithDescr> comparator,
215-
int pageNum,
216-
int perPage,
217-
SortOrderDTO sortOrderDto) {
212+
private Mono<List<ConsumerGroupDescription>> loadDescriptionsByInternalConsumerGroups(
213+
ReactiveAdminClient ac,
214+
List<ConsumerGroupListing> groups,
215+
Comparator<GroupWithDescr> comparator,
216+
int pageNum,
217+
int perPage,
218+
SortOrderDTO sortOrderDto) {
218219
var groupNames = groups.stream().map(ConsumerGroupListing::groupId).toList();
219220

220221
return ac.describeConsumerGroups(groupNames)
@@ -247,6 +248,13 @@ public Mono<Void> deleteConsumerGroupById(KafkaCluster cluster,
247248
.flatMap(adminClient -> adminClient.deleteConsumerGroups(List.of(groupId)));
248249
}
249250

251+
public Mono<Void> deleteConsumerGroupOffset(KafkaCluster cluster,
252+
String groupId,
253+
String topicName) {
254+
return adminClientService.get(cluster)
255+
.flatMap(adminClient -> adminClient.deleteConsumerGroupOffsets(groupId, topicName));
256+
}
257+
250258
public EnhancedConsumer createConsumer(KafkaCluster cluster) {
251259
return createConsumer(cluster, Map.of());
252260
}

api/src/main/java/io/kafbat/ui/service/ReactiveAdminClient.java

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@
7474
import org.apache.kafka.common.errors.ClusterAuthorizationException;
7575
import org.apache.kafka.common.errors.GroupIdNotFoundException;
7676
import org.apache.kafka.common.errors.GroupNotEmptyException;
77+
import org.apache.kafka.common.errors.GroupSubscribedToTopicException;
7778
import org.apache.kafka.common.errors.InvalidRequestException;
7879
import org.apache.kafka.common.errors.SecurityDisabledException;
7980
import org.apache.kafka.common.errors.TopicAuthorizationException;
@@ -436,6 +437,27 @@ public Mono<Void> deleteConsumerGroups(Collection<String> groupIds) {
436437
th -> Mono.error(new IllegalEntityStateException("The group is not empty")));
437438
}
438439

440+
public Mono<Void> deleteConsumerGroupOffsets(String groupId, String topicName) {
441+
return listConsumerGroupOffsets(List.of(groupId), null)
442+
.flatMap(table -> {
443+
// filter TopicPartitions by topicName
444+
Set<TopicPartition> partitions = table.row(groupId).keySet().stream()
445+
.filter(tp -> tp.topic().equals(topicName))
446+
.collect(Collectors.toSet());
447+
// check if partitions have no committed offsets
448+
return partitions.isEmpty()
449+
? Mono.error(new NotFoundException("The topic or partition is unknown"))
450+
// call deleteConsumerGroupOffsets
451+
: toMono(client.deleteConsumerGroupOffsets(groupId, partitions).all());
452+
})
453+
.onErrorResume(GroupIdNotFoundException.class,
454+
th -> Mono.error(new NotFoundException("The group id does not exist")))
455+
.onErrorResume(UnknownTopicOrPartitionException.class,
456+
th -> Mono.error(new NotFoundException("The topic or partition is unknown")))
457+
.onErrorResume(GroupSubscribedToTopicException.class,
458+
th -> Mono.error(new IllegalEntityStateException("The group is not empty")));
459+
}
460+
439461
public Mono<Void> createTopic(String name,
440462
int numPartitions,
441463
@Nullable Integer replicationFactor,

0 commit comments

Comments
 (0)