Skip to content

Commit 71c6a9e

Browse files
committed
Add interceptor for broker's state & update to librdkafka v2.0.2
1 parent 6b8ec71 commit 71c6a9e

File tree

9 files changed

+144
-103
lines changed

9 files changed

+144
-103
lines changed

.github/workflows/kafka_api_bazel_build.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ on:
99
env:
1010
KAFKA_SRC_LINK: https://archive.apache.org/dist/kafka/3.3.1/kafka_2.13-3.3.1.tgz
1111
CPU_CORE_NUM: 2
12-
LIBRDKAFKA_TAG: v1.9.2
12+
LIBRDKAFKA_TAG: v2.0.2
1313

1414
jobs:
1515
kafka-api-bazel-build:

.github/workflows/kafka_api_ci_tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ on:
99
env:
1010
KAFKA_SRC_LINK: https://archive.apache.org/dist/kafka/3.3.1/kafka_2.13-3.3.1.tgz
1111
CPU_CORE_NUM: 2
12-
LIBRDKAFKA_TAG: v1.9.2
12+
LIBRDKAFKA_TAG: v2.0.2
1313
BUILD_SUB_DIR: builds/sub-build
1414

1515
jobs:

.github/workflows/kafka_api_demo_conan_build.yml

Lines changed: 6 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -26,19 +26,9 @@ jobs:
2626
steps:
2727
- uses: actions/checkout@v2
2828

29-
- name: Prepare (non-windows)
30-
if: ${{!contains(matrix.os, 'windows')}}
31-
run: |
32-
if [[ ${OS_VERSION} == 'macos'* ]]; then
33-
brew install conan
34-
else
35-
pip3 install conan
36-
fi
37-
38-
- name: Prepare (windows)
39-
if: ${{contains(matrix.os, 'windows')}}
29+
- name: Prepare
4030
run: |
41-
pip3 install conan
31+
pip3 install conan==1.59.0
4232
4333
- name: Build (non-windows)
4434
if: ${{!contains(matrix.os, 'windows')}}
@@ -52,11 +42,8 @@ jobs:
5242
cmake .. -G "Unix Makefiles"
5343
cmake --build .
5444
55-
bin/kafka_sync_producer
56-
bin/kafka_async_producer_copy_payload
57-
bin/kafka_async_producer_not_copy_payload
58-
bin/kafka_auto_commit_consumer
59-
bin/kafka_manual_commit_consumer
45+
bin/kafka_producer
46+
bin/kafka_consumer
6047
6148
- name: Build (windows)
6249
if: contains(matrix.os, 'windows')
@@ -70,9 +57,6 @@ jobs:
7057
cmake ..
7158
cmake --build .
7259
73-
bin/kafka_sync_producer.exe
74-
bin/kafka_async_producer_copy_payload.exe
75-
bin/kafka_async_producer_not_copy_payload.exe
76-
bin/kafka_auto_commit_consumer.exe
77-
bin/kafka_manual_commit_consumer.exe
60+
bin/kafka_producer.exe
61+
bin/kafka_consumer.exe
7862

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ About the *Modern C++ Kafka API*
66

77
The [modern-cpp-kafka API](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/annotated.html) is a layer of ***C++*** wrapper based on [librdkafka](https://github.com/confluentinc/librdkafka) (the ***C*** part only), with high quality, but more friendly to users.
88

9-
- By now, [modern-cpp-kafka](https://github.com/morganstanley/modern-cpp-kafka) is compatible with [librdkafka v1.9.2](https://github.com/confluentinc/librdkafka/releases/tag/v1.9.2).
9+
- By now, [modern-cpp-kafka](https://github.com/morganstanley/modern-cpp-kafka) is compatible with [librdkafka v2.0.2](https://github.com/confluentinc/librdkafka/releases/tag/v2.0.2).
1010

1111

1212
```

demo_projects_for_build/conan_build/CMakeLists.txt

Lines changed: 6 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -7,22 +7,11 @@ set(CMAKE_CXX_STANDARD_REQUIRED True)
77
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
88
conan_basic_setup()
99

10-
# Target: kafka_sync_producer
11-
add_executable("kafka_sync_producer" "../../examples/kafka_sync_producer.cc")
12-
target_link_libraries("kafka_sync_producer" ${CONAN_LIBS})
10+
# Target: kafka_producer
11+
add_executable("kafka_producer" "../../examples/kafka_async_producer_not_copy_payload.cc")
12+
target_link_libraries("kafka_producer" ${CONAN_LIBS})
1313

14-
# Target: kafka_async_producer_copy_payload
15-
add_executable("kafka_async_producer_copy_payload" "../../examples/kafka_async_producer_copy_payload.cc")
16-
target_link_libraries("kafka_async_producer_copy_payload" ${CONAN_LIBS})
14+
# Target: kafka_consumer
15+
add_executable("kafka_consumer" "../../examples/kafka_auto_commit_consumer.cc")
16+
target_link_libraries("kafka_consumer" ${CONAN_LIBS})
1717

18-
# Target: kafka_async_producer_not_copy_payload
19-
add_executable("kafka_async_producer_not_copy_payload" "../../examples/kafka_async_producer_not_copy_payload.cc")
20-
target_link_libraries("kafka_async_producer_not_copy_payload" ${CONAN_LIBS})
21-
22-
# Target: kafka_auto_commit_consumer
23-
add_executable("kafka_auto_commit_consumer" "../../examples/kafka_auto_commit_consumer.cc")
24-
target_link_libraries("kafka_auto_commit_consumer" ${CONAN_LIBS})
25-
26-
# Target: kafka_manual_commit_consumer
27-
add_executable("kafka_manual_commit_consumer" "../../examples/kafka_manual_commit_consumer.cc")
28-
target_link_libraries("kafka_manual_commit_consumer" ${CONAN_LIBS})
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
[requires]
2-
modern-cpp-kafka/2022.06.15
2+
modern-cpp-kafka/2023.01.05
33

44
[generators]
55
cmake

include/kafka/Interceptors.h

Lines changed: 26 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,42 +16,59 @@ class Interceptors
1616
/**
1717
* Callback type for thread-start interceptor.
1818
*/
19-
using ThreadStartCallback = std::function<void(const std::string&, const std::string&)>;
19+
using ThreadStartCb = std::function<void(const std::string&, const std::string&)>;
2020

2121
/**
2222
* Callback type for thread-exit interceptor.
2323
*/
24-
using ThreadExitCallback = std::function<void(const std::string&, const std::string&)>;
24+
using ThreadExitCb = std::function<void(const std::string&, const std::string&)>;
25+
26+
/**
27+
* Callback type for broker-state-change interceptor.
28+
*/
29+
using BrokerStateChangeCb = std::function<void(int, const std::string&, const std::string&, int, const std::string&)>;
2530

2631
/**
2732
* Set interceptor for thread start.
2833
*/
29-
Interceptors& onThreadStart(ThreadStartCallback cb) { _valid = true; _threadStartCb = std::move(cb); return *this; }
34+
Interceptors& onThreadStart(ThreadStartCb cb) { _valid = true; _threadStartCb = std::move(cb); return *this; }
3035

3136
/**
3237
* Set interceptor for thread exit.
3338
*/
34-
Interceptors& onThreadExit(ThreadExitCallback cb) { _valid = true; _threadExitCb = std::move(cb); return *this; }
39+
Interceptors& onThreadExit(ThreadExitCb cb) { _valid = true; _threadExitCb = std::move(cb); return *this; }
40+
41+
/**
42+
* Set interceptor for broker state change.
43+
*/
44+
Interceptors& onBrokerStateChange(BrokerStateChangeCb cb) { _valid = true; _brokerStateChangeCb = std::move(cb); return *this; }
3545

3646
/**
3747
* Get interceptor for thread start.
3848
*/
39-
ThreadStartCallback onThreadStart() const { return _threadStartCb; }
49+
ThreadStartCb onThreadStart() const { return _threadStartCb; }
4050

4151
/**
4252
* Get interceptor for thread exit.
4353
*/
44-
ThreadExitCallback onThreadExit() const { return _threadExitCb; }
54+
ThreadExitCb onThreadExit() const { return _threadExitCb; }
55+
56+
/**
57+
* Get interceptor for broker state change.
58+
*/
59+
BrokerStateChangeCb onBrokerStateChange() const { return _brokerStateChangeCb; }
4560

4661
/**
4762
* Check if there's no interceptor.
4863
*/
4964
bool empty() const { return !_valid; }
5065

5166
private:
52-
ThreadStartCallback _threadStartCb;
53-
ThreadExitCallback _threadExitCb;
54-
bool _valid = false;
67+
ThreadStartCb _threadStartCb;
68+
ThreadExitCb _threadExitCb;
69+
BrokerStateChangeCb _brokerStateChangeCb;
70+
71+
bool _valid = false;
5572
};
5673

5774
} } // end of KAFKA_API::clients

include/kafka/KafkaClient.h

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,7 @@ class KafkaClient
189189
static rd_kafka_resp_err_t configInterceptorOnNew(rd_kafka_t* rk, const rd_kafka_conf_t* conf, void* opaque, char* errStr, std::size_t maxErrStrSize);
190190
static rd_kafka_resp_err_t interceptorOnThreadStart(rd_kafka_t* rk, rd_kafka_thread_type_t threadType, const char* threadName, void* opaque);
191191
static rd_kafka_resp_err_t interceptorOnThreadExit(rd_kafka_t* rk, rd_kafka_thread_type_t threadType, const char* threadName, void* opaque);
192+
static rd_kafka_resp_err_t interceptorOnBrokerStateChange(rd_kafka_t* rk, int id, const char* secproto, const char* host, int port, const char* state, void* opaque);
192193

193194
// Log callback (for class instance)
194195
void onLog(int level, const char* fac, const char* buf) const;
@@ -205,6 +206,7 @@ class KafkaClient
205206
// Interceptor callback (for class instance)
206207
void interceptThreadStart(const std::string& threadName, const std::string& threadType);
207208
void interceptThreadExit(const std::string& threadName, const std::string& threadType);
209+
void interceptBrokerStateChange(int id, const std::string& secproto, const std::string& host, int port, const std::string& state);
208210

209211
protected:
210212
struct Pollable
@@ -608,6 +610,12 @@ KafkaClient::interceptThreadExit(const std::string& threadName, const std::strin
608610
if (const auto& cb = _interceptors.onThreadExit()) cb(threadName, threadType);
609611
}
610612

613+
inline void
614+
KafkaClient::interceptBrokerStateChange(int id, const std::string& secproto, const std::string& host, int port, const std::string& state)
615+
{
616+
if (const auto& cb = _interceptors.onBrokerStateChange()) cb(id, secproto, host, port, state);
617+
}
618+
611619
inline rd_kafka_resp_err_t
612620
KafkaClient::configInterceptorOnNew(rd_kafka_t* rk, const rd_kafka_conf_t* /*conf*/, void* opaque, char* /*errStr*/, std::size_t /*maxErrStrSize*/)
613621
{
@@ -621,25 +629,38 @@ KafkaClient::configInterceptorOnNew(rd_kafka_t* rk, const rd_kafka_conf_t* /*con
621629
return result;
622630
}
623631

632+
if (auto result = rd_kafka_interceptor_add_on_broker_state_change(rk, "on_broker_state_change", KafkaClient::interceptorOnBrokerStateChange, opaque))
633+
{
634+
return result;
635+
}
636+
624637
return RD_KAFKA_RESP_ERR_NO_ERROR;
625638
}
626639

627640
inline rd_kafka_resp_err_t
628-
KafkaClient::interceptorOnThreadStart(rd_kafka_t* rk, rd_kafka_thread_type_t threadType, const char* threadName, void* /*opaque*/)
641+
KafkaClient::interceptorOnThreadStart(rd_kafka_t* rk, rd_kafka_thread_type_t threadType, const char* threadName, void* /* opaque */)
629642
{
630643
kafkaClient(rk).interceptThreadStart(threadName, toString(threadType));
631644

632645
return RD_KAFKA_RESP_ERR_NO_ERROR;
633646
}
634647

635648
inline rd_kafka_resp_err_t
636-
KafkaClient::interceptorOnThreadExit(rd_kafka_t* rk, rd_kafka_thread_type_t threadType, const char* threadName, void* /*opaque*/)
649+
KafkaClient::interceptorOnThreadExit(rd_kafka_t* rk, rd_kafka_thread_type_t threadType, const char* threadName, void* /* opaque */)
637650
{
638651
kafkaClient(rk).interceptThreadExit(threadName, toString(threadType));
639652

640653
return RD_KAFKA_RESP_ERR_NO_ERROR;
641654
}
642655

656+
inline rd_kafka_resp_err_t
657+
KafkaClient::interceptorOnBrokerStateChange(rd_kafka_t* rk, int id, const char* secproto, const char* host, int port, const char* state, void* /* opaque */)
658+
{
659+
kafkaClient(rk).interceptBrokerStateChange(id, secproto, host, port, state);
660+
661+
return RD_KAFKA_RESP_ERR_NO_ERROR;
662+
}
663+
643664
inline Optional<BrokerMetadata>
644665
KafkaClient::fetchBrokerMetadata(const std::string& topic, std::chrono::milliseconds timeout, bool disableErrorLogging)
645666
{

tests/integration/TestKafkaConsumer.cc

Lines changed: 79 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -23,67 +23,97 @@ TEST(KafkaConsumer, BasicPoll)
2323

2424
KafkaTestUtility::CreateKafkaTopic(topic, 5, 3);
2525

26-
// The auto-commit consumer
27-
kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig());
28-
std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl;
26+
std::map<std::string, std::string> brokersState;
27+
28+
kafka::clients::Interceptors interceptors;
29+
interceptors.onBrokerStateChange([&brokersState](int id, const std::string& proto, const std::string& name, int port, const std::string& state) {
30+
const std::string brokerDescription = (std::to_string(id) + " - " + proto + "://" + name + ":" + std::to_string(port));
31+
std::cout << "Broker[" << brokerDescription << "] ==> " << state << std::endl;
32+
if (!name.empty() && name != "GroupCoordinator")
33+
{
34+
brokersState[name + ":" + std::to_string(port)] = state;
35+
}
36+
});
2937

30-
// Subscribe topics
31-
consumer.subscribe({topic},
32-
[](kafka::clients::consumer::RebalanceEventType et, const kafka::TopicPartitions& tps) {
33-
if (et == kafka::clients::consumer::RebalanceEventType::PartitionsAssigned) {
34-
// assignment finished
35-
std::cout << "[" << kafka::utility::getCurrentTime() << "] assigned partitions: " << kafka::toString(tps) << std::endl;
36-
}
37-
});
38-
EXPECT_FALSE(consumer.subscription().empty());
38+
{
39+
// Config the consumer with interceptors
40+
kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()
41+
.put(kafka::clients::Config::INTERCEPTORS, interceptors));
3942

40-
// No message yet
41-
auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer, std::chrono::seconds(1));
42-
EXPECT_EQ(0, records.size());
43+
std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl;
4344

44-
// Try to get the beginning offsets
45-
const kafka::TopicPartition tp{topic, partition};
46-
std::cout << "[" << kafka::utility::getCurrentTime() << "] Consumer get the beginningOffset[" << consumer.beginningOffsets({tp})[tp] << "]" << std::endl;;
45+
// Subscribe topics
46+
consumer.subscribe({topic},
47+
[](kafka::clients::consumer::RebalanceEventType et, const kafka::TopicPartitions& tps) {
48+
if (et == kafka::clients::consumer::RebalanceEventType::PartitionsAssigned) {
49+
// assignment finished
50+
std::cout << "[" << kafka::utility::getCurrentTime() << "] assigned partitions: " << kafka::toString(tps) << std::endl;
51+
}
52+
});
53+
EXPECT_FALSE(consumer.subscription().empty());
4754

48-
// Prepare some messages to send
49-
const std::vector<std::tuple<kafka::Headers, std::string, std::string>> messages = {
50-
{kafka::Headers{}, "key1", "value1"},
51-
{kafka::Headers{}, "key2", "value2"},
52-
{kafka::Headers{}, "key3", "value3"},
53-
};
55+
// No message yet
56+
auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer, std::chrono::seconds(1));
57+
EXPECT_EQ(0, records.size());
5458

55-
// Send the messages
56-
KafkaTestUtility::ProduceMessages(topic, partition, messages);
59+
// Should be able to get all brokers' state
60+
EXPECT_EQ(KafkaTestUtility::GetNumberOfKafkaBrokers(), brokersState.size());
61+
// All brokers' state should be "UP"
62+
for (const auto& brokerState: brokersState)
63+
{
64+
EXPECT_EQ("UP", brokerState.second);
65+
}
5766

58-
// Poll these messages
59-
records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer);
60-
EXPECT_EQ(messages.size(), records.size());
67+
// Try to get the beginning offsets
68+
const kafka::TopicPartition tp{topic, partition};
69+
std::cout << "[" << kafka::utility::getCurrentTime() << "] Consumer get the beginningOffset[" << consumer.beginningOffsets({tp})[tp] << "]" << std::endl;;
6170

62-
// Copyable ConsumerRecord
63-
{
64-
auto recordsCopy = records;
65-
recordsCopy.clear();
66-
}
71+
// Prepare some messages to send
72+
const std::vector<std::tuple<kafka::Headers, std::string, std::string>> messages = {
73+
{kafka::Headers{}, "key1", "value1"},
74+
{kafka::Headers{}, "key2", "value2"},
75+
{kafka::Headers{}, "key3", "value3"},
76+
};
6777

68-
// Check messages
69-
std::size_t rcvMsgCount = 0;
70-
for (auto& record: records)
71-
{
72-
ASSERT_TRUE(rcvMsgCount < messages.size());
78+
// Send the messages
79+
KafkaTestUtility::ProduceMessages(topic, partition, messages);
7380

74-
EXPECT_EQ(topic, record.topic());
75-
EXPECT_EQ(partition, record.partition());
76-
EXPECT_EQ(0, record.headers().size());
77-
EXPECT_EQ(std::get<1>(messages[rcvMsgCount]).size(), record.key().size());
78-
EXPECT_EQ(0, std::memcmp(std::get<1>(messages[rcvMsgCount]).c_str(), record.key().data(), record.key().size()));
79-
EXPECT_EQ(std::get<2>(messages[rcvMsgCount]).size(), record.value().size());
80-
EXPECT_EQ(0, std::memcmp(std::get<2>(messages[rcvMsgCount]).c_str(), record.value().data(), record.value().size()));
81+
// Poll these messages
82+
records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer);
83+
EXPECT_EQ(messages.size(), records.size());
8184

82-
++rcvMsgCount;
85+
// Copyable ConsumerRecord
86+
{
87+
auto recordsCopy = records;
88+
recordsCopy.clear();
89+
}
90+
91+
// Check messages
92+
std::size_t rcvMsgCount = 0;
93+
for (auto& record: records)
94+
{
95+
ASSERT_TRUE(rcvMsgCount < messages.size());
96+
97+
EXPECT_EQ(topic, record.topic());
98+
EXPECT_EQ(partition, record.partition());
99+
EXPECT_EQ(0, record.headers().size());
100+
EXPECT_EQ(std::get<1>(messages[rcvMsgCount]).size(), record.key().size());
101+
EXPECT_EQ(0, std::memcmp(std::get<1>(messages[rcvMsgCount]).c_str(), record.key().data(), record.key().size()));
102+
EXPECT_EQ(std::get<2>(messages[rcvMsgCount]).size(), record.value().size());
103+
EXPECT_EQ(0, std::memcmp(std::get<2>(messages[rcvMsgCount]).c_str(), record.value().data(), record.value().size()));
104+
105+
++rcvMsgCount;
106+
}
107+
108+
// Close the consumer
109+
consumer.close();
83110
}
84111

85-
// Close the consumer
86-
consumer.close();
112+
// All brokers' state should be "DOWN"
113+
for (const auto& brokerState: brokersState)
114+
{
115+
EXPECT_EQ("DOWN", brokerState.second);
116+
}
87117
}
88118

89119
TEST(KafkaConsumer, PollWithHeaders)

0 commit comments

Comments
 (0)