Skip to content

Commit 8e1ab2b

Browse files
authored
Test support for reactor-kafka (#7886)
* Test support for reactor-kafka * leftover * Run test foked * dispose kafka receivers at test end * Test on 3.8 are just flaky
1 parent a63909c commit 8e1ab2b

File tree

4 files changed

+475
-21
lines changed

4 files changed

+475
-21
lines changed

dd-java-agent/instrumentation/kafka-clients-0.11/build.gradle

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,13 @@ dependencies {
1919
testImplementation group: 'org.apache.kafka', name: 'kafka-clients', version: '0.11.0.0'
2020
testImplementation group: 'org.springframework.kafka', name: 'spring-kafka', version: '1.3.3.RELEASE'
2121
testImplementation group: 'org.springframework.kafka', name: 'spring-kafka-test', version: '1.3.3.RELEASE'
22+
testImplementation group: 'io.projectreactor.kafka', name: 'reactor-kafka', version: '1.0.0.RELEASE'
2223
testImplementation group: 'javax.xml.bind', name: 'jaxb-api', version: '2.2.3'
2324
testImplementation group: 'org.assertj', name: 'assertj-core', version: '2.9.+'
2425
testImplementation group: 'org.mockito', name: 'mockito-core', version: '2.19.0'
2526
testRuntimeOnly project(':dd-java-agent:instrumentation:spring-scheduling-3.1')
27+
testRuntimeOnly project(':dd-java-agent:instrumentation:reactor-core-3.1')
28+
testRuntimeOnly project(':dd-java-agent:instrumentation:reactive-streams')
2629
testImplementation(testFixtures(project(':dd-java-agent:agent-iast')))
2730

2831

@@ -38,6 +41,8 @@ dependencies {
3841
// This seems to help with jar compatibility hell.
3942
latestDepTestImplementation group: 'org.apache.kafka', name: 'kafka_2.13', version: '2.+'
4043
latestDepTestImplementation group: 'org.apache.kafka', name: 'kafka-clients', version: '2.+'
44+
// latest depending to kafka client 2.x -> to be fixed when this instrumentation will test 3.x as well
45+
latestDepTestImplementation group: 'io.projectreactor.kafka', name: 'reactor-kafka', version: '1.3.21'
4146
latestDepTestImplementation group: 'org.springframework.kafka', name: 'spring-kafka', version: '2.+'
4247
latestDepTestImplementation group: 'org.springframework.kafka', name: 'spring-kafka-test', version: '2.+'
4348
latestDepTestImplementation group: 'org.assertj', name: 'assertj-core', version: '3.19.+'
Lines changed: 230 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,230 @@
1+
import static datadog.trace.agent.test.utils.TraceUtils.basicSpan
2+
import static datadog.trace.agent.test.utils.TraceUtils.runUnderTrace
3+
4+
import datadog.trace.agent.test.AgentTestRunner
5+
import datadog.trace.agent.test.asserts.TraceAssert
6+
import datadog.trace.bootstrap.instrumentation.api.InstrumentationTags
7+
import datadog.trace.bootstrap.instrumentation.api.Tags
8+
import datadog.trace.common.writer.ListWriter
9+
import datadog.trace.core.DDSpan
10+
import org.apache.kafka.clients.consumer.ConsumerConfig
11+
import org.apache.kafka.clients.consumer.ConsumerRecord
12+
import org.apache.kafka.clients.producer.ProducerConfig
13+
import org.apache.kafka.clients.producer.ProducerRecord
14+
import org.junit.Rule
15+
import org.springframework.kafka.test.EmbeddedKafkaBroker
16+
import org.springframework.kafka.test.rule.EmbeddedKafkaRule
17+
import org.springframework.kafka.test.utils.KafkaTestUtils
18+
import reactor.core.publisher.Flux
19+
import reactor.core.publisher.Mono
20+
import reactor.core.scheduler.Schedulers
21+
import reactor.kafka.receiver.KafkaReceiver
22+
import reactor.kafka.receiver.ReceiverOptions
23+
import reactor.kafka.sender.KafkaSender
24+
import reactor.kafka.sender.SenderOptions
25+
import reactor.kafka.sender.SenderRecord
26+
27+
import java.util.concurrent.CountDownLatch
28+
import java.util.concurrent.LinkedBlockingQueue
29+
import java.util.concurrent.TimeUnit
30+
31+
class KafkaReactorForkedTest extends AgentTestRunner {
32+
@Rule
33+
EmbeddedKafkaRule kafkaRule = new EmbeddedKafkaRule(1, true, 4, KafkaClientTest.SHARED_TOPIC)
34+
EmbeddedKafkaBroker embeddedKafka = kafkaRule.embeddedKafka
35+
36+
37+
@Override
38+
boolean useStrictTraceWrites() {
39+
false
40+
}
41+
42+
def setup() {
43+
TEST_WRITER.setFilter(new ListWriter.Filter() {
44+
@Override
45+
boolean accept(List<DDSpan> trace) {
46+
return !(trace.size() == 1 &&
47+
trace.get(0).getResourceName().toString().equals("kafka.poll"))
48+
}
49+
})
50+
}
51+
52+
def "test reactive produce and consume"() {
53+
setup:
54+
def senderProps = KafkaTestUtils.producerProps(embeddedKafka)
55+
if (isDataStreamsEnabled()) {
56+
senderProps.put(ProducerConfig.METADATA_MAX_AGE_CONFIG, 1000)
57+
}
58+
59+
def kafkaSender = KafkaSender.create(SenderOptions.create(senderProps))
60+
// set up the Kafka consumer properties
61+
def consumerProperties = KafkaTestUtils.consumerProps("sender", "false", embeddedKafka)
62+
def subscriptionReady = new CountDownLatch(embeddedKafka.getPartitionsPerTopic())
63+
64+
final KafkaReceiver<String, String> kafkaReceiver = KafkaReceiver.create(ReceiverOptions.<String, String> create(consumerProperties)
65+
.subscription([KafkaClientTest.SHARED_TOPIC])
66+
.addAssignListener {
67+
it.each { subscriptionReady.countDown() }
68+
})
69+
70+
// create a thread safe queue to store the received message
71+
def records = new LinkedBlockingQueue<ConsumerRecord<String, String>>()
72+
kafkaReceiver.receive()
73+
// publish on another thread to be sure we're propagating that receive span correctly
74+
.publishOn(Schedulers.parallel())
75+
.flatMap { receiverRecord ->
76+
{
77+
records.add(receiverRecord)
78+
receiverRecord.receiverOffset().commit()
79+
}
80+
}.subscribe()
81+
82+
83+
// wait until the container has the required number of assigned partitions
84+
subscriptionReady.await()
85+
86+
when:
87+
String greeting = "Hello Reactor Kafka Sender!"
88+
runUnderTrace("parent") {
89+
kafkaSender.send(Mono.just(SenderRecord.create(new ProducerRecord<>(KafkaClientTest.SHARED_TOPIC, greeting), null)))
90+
.doOnError { ex -> runUnderTrace("producer exception: " + ex) {} }
91+
.doOnNext { runUnderTrace("producer callback") {} }
92+
.blockFirst()
93+
blockUntilChildSpansFinished(2)
94+
}
95+
then:
96+
// check that the message was received
97+
def received = records.poll(5, TimeUnit.SECONDS)
98+
received.value() == greeting
99+
received.key() == null
100+
101+
102+
assertTraces(2, SORT_TRACES_BY_START) {
103+
trace(3) {
104+
basicSpan(it, "parent")
105+
basicSpan(it, "producer callback", span(0))
106+
producerSpan(it, senderProps, span(0))
107+
}
108+
trace(1) {
109+
consumerSpan(it, consumerProperties, trace(0)[2])
110+
}
111+
}
112+
}
113+
114+
def "test reactive 100 msg produce and consume have only one parent"() {
115+
setup:
116+
def senderProps = KafkaTestUtils.producerProps(embeddedKafka)
117+
if (isDataStreamsEnabled()) {
118+
senderProps.put(ProducerConfig.METADATA_MAX_AGE_CONFIG, 1000)
119+
}
120+
121+
def kafkaSender = KafkaSender.create(SenderOptions.create(senderProps))
122+
// set up the Kafka consumer properties
123+
def consumerProperties = KafkaTestUtils.consumerProps("sender", "false", embeddedKafka)
124+
def subscriptionReady = new CountDownLatch(embeddedKafka.getPartitionsPerTopic())
125+
126+
final KafkaReceiver<String, String> kafkaReceiver = KafkaReceiver.create(ReceiverOptions.<String, String> create(consumerProperties)
127+
.subscription([KafkaClientTest.SHARED_TOPIC])
128+
.addAssignListener {
129+
it.each { subscriptionReady.countDown() }
130+
})
131+
132+
// create a thread safe queue to store the received message
133+
kafkaReceiver.receive()
134+
// publish on another thread to be sure we're propagating that receive span correctly
135+
.publishOn(Schedulers.parallel())
136+
.flatMap { receiverRecord ->
137+
{
138+
receiverRecord.receiverOffset().commit()
139+
}
140+
}
141+
.subscribeOn(Schedulers.parallel())
142+
.subscribe()
143+
144+
145+
// wait until the container has the required number of assigned partitions
146+
subscriptionReady.await()
147+
148+
when:
149+
String greeting = "Hello Reactor Kafka Sender!"
150+
Flux.range(0, 100)
151+
.flatMap { kafkaSender.send(Mono.just(SenderRecord.create(new ProducerRecord<>(KafkaClientTest.SHARED_TOPIC, greeting), null))) }
152+
.publishOn(Schedulers.parallel())
153+
.subscribe()
154+
then:
155+
// check that the all the consume (100) and the send (100) are reported
156+
TEST_WRITER.waitForTraces(200)
157+
Map<String, List<DDSpan>> traces = TEST_WRITER.inject([:]) { map, entry ->
158+
def key = entry.get(0).getTraceId().toString()
159+
map[key] = (map[key] ?: []) + entry
160+
return map
161+
}
162+
traces.values().each {
163+
assert it.size() == 2
164+
int produceIndex = 0
165+
int consumeIndex = 1
166+
if ("kafka.produce".contentEquals(it.get(1).getOperationName().toString())) {
167+
produceIndex = 1
168+
consumeIndex = 0
169+
}
170+
//assert that the consumer has the producer as parent and that the producer is root
171+
assert it.get(consumeIndex).getParentId() == it.get(produceIndex).getSpanId()
172+
assert it.get(produceIndex).getParentId() == 0
173+
}
174+
}
175+
176+
def producerSpan(
177+
TraceAssert trace,
178+
Map<String, ?> config,
179+
DDSpan parentSpan = null) {
180+
trace.span {
181+
serviceName "kafka"
182+
operationName "kafka.produce"
183+
resourceName "Produce Topic $KafkaClientTest.SHARED_TOPIC"
184+
spanType "queue"
185+
errored false
186+
measured true
187+
if (parentSpan) {
188+
childOf parentSpan
189+
} else {
190+
parent()
191+
}
192+
tags {
193+
"$Tags.COMPONENT" "java-kafka"
194+
"$Tags.SPAN_KIND" Tags.SPAN_KIND_PRODUCER
195+
"$InstrumentationTags.KAFKA_BOOTSTRAP_SERVERS" config.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)
196+
peerServiceFrom(InstrumentationTags.KAFKA_BOOTSTRAP_SERVERS)
197+
defaultTags()
198+
}
199+
}
200+
}
201+
202+
def consumerSpan(
203+
TraceAssert trace,
204+
Map<String, Object> config,
205+
DDSpan parentSpan = null) {
206+
trace.span {
207+
serviceName "kafka"
208+
operationName "kafka.consume"
209+
resourceName "Consume Topic $KafkaClientTest.SHARED_TOPIC"
210+
spanType "queue"
211+
errored false
212+
measured true
213+
if (parentSpan) {
214+
childOf parentSpan
215+
} else {
216+
parent()
217+
}
218+
tags {
219+
"$Tags.COMPONENT" "java-kafka"
220+
"$Tags.SPAN_KIND" Tags.SPAN_KIND_CONSUMER
221+
"$InstrumentationTags.PARTITION" { it >= 0 }
222+
"$InstrumentationTags.OFFSET" { Integer }
223+
"$InstrumentationTags.CONSUMER_GROUP" "sender"
224+
"$InstrumentationTags.KAFKA_BOOTSTRAP_SERVERS" config.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)
225+
"$InstrumentationTags.RECORD_QUEUE_TIME_MS" { it >= 0 }
226+
defaultTags(true)
227+
}
228+
}
229+
}
230+
}

0 commit comments

Comments
 (0)