Skip to content

Commit 2b23258

Browse files
authored
Merge branch 'main' into feature/reset-connector-offsets
2 parents 035f2b3 + 01aa8ab commit 2b23258

File tree

6 files changed

+51
-11
lines changed

6 files changed

+51
-11
lines changed

.github/dependabot.yml

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,23 @@ updates:
1414
- "type/dependencies"
1515
- "scope/backend"
1616

17+
- package-ecosystem: docker
18+
directory: "/api"
19+
schedule:
20+
interval: weekly
21+
time: "10:00"
22+
timezone: Europe/London
23+
reviewers:
24+
- "kafbat/backend"
25+
open-pull-requests-limit: 10
26+
ignore:
27+
- dependency-name: "azul/zulu-openjdk-alpine"
28+
# Limit dependabot pull requests to minor Java upgrades
29+
update-types: ["version-update:semver-major"]
30+
labels:
31+
- "type/dependencies"
32+
- "scope/backend"
33+
1734
- package-ecosystem: npm
1835
directory: "/frontend"
1936
schedule:

api/Dockerfile

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
1-
FROM azul/zulu-openjdk-alpine:17.0.11-jre-headless
1+
# The tag is ignored when a sha is included but the reason to add it are:
2+
# 1. Self Documentation: It is difficult to find out what the expected tag is given a sha alone
3+
# 2. Helps dependabot during discovery of upgrades
4+
FROM azul/zulu-openjdk-alpine:17-jre-headless-latest@sha256:af4df00adaec356d092651af50d9e80fd179f96722d267e79acb564aede10fda
25

36
RUN apk add --no-cache \
47
# snappy codec

api/src/main/java/io/kafbat/ui/controller/MessagesController.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,10 +118,11 @@ public Mono<ResponseEntity<Flux<TopicMessageEventDTO>>> getTopicMessagesV2(Strin
118118
if (cursor != null) {
119119
messagesFlux = messagesService.loadMessages(getCluster(clusterName), topicName, cursor);
120120
} else {
121+
var pollingMode = mode == null ? PollingModeDTO.LATEST : mode;
121122
messagesFlux = messagesService.loadMessages(
122123
getCluster(clusterName),
123124
topicName,
124-
ConsumerPosition.create(checkNotNull(mode), checkNotNull(topicName), partitions, timestamp, offset),
125+
ConsumerPosition.create(pollingMode, checkNotNull(topicName), partitions, timestamp, offset),
125126
stringFilter,
126127
smartFilterId,
127128
limit,

api/src/main/java/io/kafbat/ui/serdes/ProducerRecordCreator.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ public ProducerRecord<byte[], byte[]> create(String topic,
3131

3232
private Iterable<Header> createHeaders(Map<String, String> clientHeaders) {
3333
RecordHeaders headers = new RecordHeaders();
34-
clientHeaders.forEach((k, v) -> headers.add(new RecordHeader(k, v.getBytes())));
34+
clientHeaders.forEach((k, v) -> headers.add(new RecordHeader(k, v == null ? null : v.getBytes())));
3535
return headers;
3636
}
3737

api/src/test/java/io/kafbat/ui/service/SendAndReadTests.java

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
import io.kafbat.ui.serdes.builtin.StringSerde;
2020
import io.kafbat.ui.serdes.builtin.sr.SchemaRegistrySerde;
2121
import java.time.Duration;
22+
import java.util.Collections;
2223
import java.util.List;
2324
import java.util.Map;
2425
import java.util.Objects;
@@ -425,6 +426,25 @@ void topicMessageMetadataJson() {
425426
});
426427
}
427428

429+
@Test
430+
void headerValueNullPresentTest() {
431+
new SendAndReadSpec()
432+
.withKeySchema(JSON_SCHEMA)
433+
.withValueSchema(JSON_SCHEMA)
434+
.withMsgToSend(
435+
new CreateTopicMessageDTO()
436+
.key(JSON_SCHEMA_RECORD)
437+
.keySerde(SchemaRegistrySerde.name())
438+
.content(JSON_SCHEMA_RECORD)
439+
.valueSerde(SchemaRegistrySerde.name())
440+
.headers(Collections.singletonMap("header123", null))
441+
)
442+
.doAssert(polled -> {
443+
assertThat(polled.getHeaders().get("header123")).isNull();
444+
});
445+
}
446+
447+
428448
@Test
429449
void noKeyAndNoContentPresentTest() {
430450
new SendAndReadSpec()

documentation/compose/e2e-tests.yaml

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,8 @@ services:
2929
KAFKA_CLUSTERS_0_KSQLDBSERVER: http://ksqldb:8088
3030

3131
kafka0:
32-
image: confluentinc/cp-kafka:7.2.1
32+
image: confluentinc/cp-kafka:7.6.0
33+
user: "0:0"
3334
hostname: kafka0
3435
container_name: kafka0
3536
healthcheck:
@@ -58,12 +59,10 @@ services:
5859
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
5960
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
6061
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
61-
volumes:
62-
- ./scripts/update_run.sh:/tmp/update_run.sh
63-
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
62+
CLUSTER_ID: 'MkU3OEVBNTcwNTJENDM2Qk'
6463

6564
schemaregistry0:
66-
image: confluentinc/cp-schema-registry:7.2.1
65+
image: confluentinc/cp-schema-registry:7.6.0
6766
ports:
6867
- 8085:8085
6968
depends_on:
@@ -88,7 +87,7 @@ services:
8887
build:
8988
context: ./kafka-connect
9089
args:
91-
image: confluentinc/cp-kafka-connect:6.0.1
90+
image: confluentinc/cp-kafka-connect:7.6.0
9291
ports:
9392
- 8083:8083
9493
depends_on:
@@ -122,7 +121,7 @@ services:
122121
# AWS_SECRET_ACCESS_KEY: ""
123122

124123
kafka-init-topics:
125-
image: confluentinc/cp-kafka:7.2.1
124+
image: confluentinc/cp-kafka:7.6.0
126125
volumes:
127126
- ./data/message.json:/data/message.json
128127
depends_on:
@@ -162,7 +161,7 @@ services:
162161
command: bash -c '/connectors/start.sh'
163162

164163
ksqldb:
165-
image: confluentinc/ksqldb-server:0.18.0
164+
image: confluentinc/cp-ksqldb-server:7.6.0
166165
healthcheck:
167166
test: [ "CMD", "timeout", "1", "curl", "--silent", "--fail", "http://localhost:8088/info" ]
168167
interval: 30s

0 commit comments

Comments
 (0)