@@ -31,30 +31,30 @@ public final class RDKafkaClient: Sendable {
31
31
}
32
32
33
33
/// Handle for the C library's Kafka instance.
34
- private let kafkaHandle : OpaquePointer
34
+ private let kafkaHandle : SendableOpaquePointer
35
35
/// A logger.
36
36
private let logger : Logger
37
37
38
38
/// `librdkafka`'s `rd_kafka_queue_t` that events are received on.
39
- private let queue : OpaquePointer
39
+ private let queueHandle : SendableOpaquePointer
40
40
41
41
// Use factory method to initialize
42
42
private init (
43
43
type: ClientType ,
44
- kafkaHandle: OpaquePointer ,
44
+ kafkaHandle: SendableOpaquePointer ,
45
45
logger: Logger
46
46
) {
47
47
self . kafkaHandle = kafkaHandle
48
48
self . logger = logger
49
- self . queue = rd_kafka_queue_get_main ( self . kafkaHandle)
49
+ self . queueHandle = . init ( rd_kafka_queue_get_main ( self . kafkaHandle. pointer ) )
50
50
51
- rd_kafka_set_log_queue ( self . kafkaHandle, self . queue )
51
+ rd_kafka_set_log_queue ( self . kafkaHandle. pointer , self . queueHandle . pointer )
52
52
}
53
53
54
54
deinit {
55
55
// Loose reference to librdkafka's event queue
56
- rd_kafka_queue_destroy ( self . queue )
57
- rd_kafka_destroy ( kafkaHandle)
56
+ rd_kafka_queue_destroy ( self . queueHandle . pointer )
57
+ rd_kafka_destroy ( kafkaHandle. pointer )
58
58
}
59
59
60
60
/// Factory method creating a new instance of a ``RDKafkaClient``.
@@ -87,7 +87,8 @@ public final class RDKafkaClient: Sendable {
87
87
throw KafkaError . client ( reason: errorString)
88
88
}
89
89
90
- return RDKafkaClient ( type: type, kafkaHandle: handle, logger: logger)
90
+ let kafkaHandle = SendableOpaquePointer ( handle)
91
+ return RDKafkaClient ( type: type, kafkaHandle: kafkaHandle, logger: logger)
91
92
}
92
93
93
94
/// Produce a message to the Kafka cluster.
@@ -212,7 +213,7 @@ public final class RDKafkaClient: Sendable {
212
213
assert ( arguments. count == size)
213
214
214
215
return rd_kafka_produceva (
215
- self . kafkaHandle,
216
+ self . kafkaHandle. pointer ,
216
217
arguments,
217
218
arguments. count
218
219
)
@@ -304,7 +305,7 @@ public final class RDKafkaClient: Sendable {
304
305
events. reserveCapacity ( maxEvents)
305
306
306
307
for _ in 0 ..< maxEvents {
307
- let event = rd_kafka_queue_poll ( self . queue , 0 )
308
+ let event = rd_kafka_queue_poll ( self . queueHandle . pointer , 0 )
308
309
defer { rd_kafka_event_destroy ( event) }
309
310
310
311
let rdEventType = rd_kafka_event_type ( event)
@@ -437,7 +438,7 @@ public final class RDKafkaClient: Sendable {
437
438
/// - Returns: A ``KafkaConsumerMessage`` or `nil` if there are no new messages.
438
439
/// - Throws: A ``KafkaError`` if the received message is an error message or malformed.
439
440
func consumerPoll( ) throws -> KafkaConsumerMessage ? {
440
- guard let messagePointer = rd_kafka_consumer_poll ( self . kafkaHandle, 0 ) else {
441
+ guard let messagePointer = rd_kafka_consumer_poll ( self . kafkaHandle. pointer , 0 ) else {
441
442
// No error, there might be no more messages
442
443
return nil
443
444
}
@@ -460,7 +461,7 @@ public final class RDKafkaClient: Sendable {
460
461
/// - Parameter topicPartitionList: Pointer to a list of topics + partition pairs.
461
462
func subscribe( topicPartitionList: RDKafkaTopicPartitionList ) throws {
462
463
try topicPartitionList. withListPointer { pointer in
463
- let result = rd_kafka_subscribe ( self . kafkaHandle, pointer)
464
+ let result = rd_kafka_subscribe ( self . kafkaHandle. pointer , pointer)
464
465
if result != RD_KAFKA_RESP_ERR_NO_ERROR {
465
466
throw KafkaError . rdKafkaError ( wrapping: result)
466
467
}
@@ -471,7 +472,7 @@ public final class RDKafkaClient: Sendable {
471
472
/// - Parameter topicPartitionList: Pointer to a list of topics + partition pairs.
472
473
func assign( topicPartitionList: RDKafkaTopicPartitionList ) throws {
473
474
try topicPartitionList. withListPointer { pointer in
474
- let result = rd_kafka_assign ( self . kafkaHandle, pointer)
475
+ let result = rd_kafka_assign ( self . kafkaHandle. pointer , pointer)
475
476
if result != RD_KAFKA_RESP_ERR_NO_ERROR {
476
477
throw KafkaError . rdKafkaError ( wrapping: result)
477
478
}
@@ -508,7 +509,7 @@ public final class RDKafkaClient: Sendable {
508
509
509
510
let error = changesList. withListPointer { listPointer in
510
511
return rd_kafka_commit (
511
- self . kafkaHandle,
512
+ self . kafkaHandle. pointer ,
512
513
listPointer,
513
514
1 // async = true
514
515
)
@@ -551,9 +552,9 @@ public final class RDKafkaClient: Sendable {
551
552
552
553
changesList. withListPointer { listPointer in
553
554
rd_kafka_commit_queue (
554
- self . kafkaHandle,
555
+ self . kafkaHandle. pointer ,
555
556
listPointer,
556
- self . queue ,
557
+ self . queueHandle . pointer ,
557
558
nil ,
558
559
opaquePointer
559
560
)
@@ -572,7 +573,7 @@ public final class RDKafkaClient: Sendable {
572
573
let queue = DispatchQueue ( label: " com.swift-server.swift-kafka.flush " )
573
574
try await withCheckedThrowingContinuation { ( continuation: CheckedContinuation < Void , Error > ) in
574
575
queue. async {
575
- let error = rd_kafka_flush ( self . kafkaHandle, timeoutMilliseconds)
576
+ let error = rd_kafka_flush ( self . kafkaHandle. pointer , timeoutMilliseconds)
576
577
if error != RD_KAFKA_RESP_ERR_NO_ERROR {
577
578
continuation. resume ( throwing: KafkaError . rdKafkaError ( wrapping: error) )
578
579
} else {
@@ -587,7 +588,7 @@ public final class RDKafkaClient: Sendable {
587
588
///
588
589
/// Make sure to run poll loop until ``RDKafkaClient/consumerIsClosed`` returns `true`.
589
590
func consumerClose( ) throws {
590
- let result = rd_kafka_consumer_close_queue ( self . kafkaHandle, self . queue )
591
+ let result = rd_kafka_consumer_close_queue ( self . kafkaHandle. pointer , self . queueHandle . pointer )
591
592
let kafkaError = rd_kafka_error_code ( result)
592
593
if kafkaError != RD_KAFKA_RESP_ERR_NO_ERROR {
593
594
throw KafkaError . rdKafkaError ( wrapping: kafkaError)
@@ -596,14 +597,14 @@ public final class RDKafkaClient: Sendable {
596
597
597
598
/// Returns `true` if the underlying `librdkafka` consumer is closed.
598
599
var isConsumerClosed : Bool {
599
- rd_kafka_consumer_closed ( self . kafkaHandle) == 1
600
+ rd_kafka_consumer_closed ( self . kafkaHandle. pointer ) == 1
600
601
}
601
602
602
603
/// Scoped accessor that enables safe access to the pointer of the client's Kafka handle.
603
604
/// - Warning: Do not escape the pointer from the closure for later use.
604
605
/// - Parameter body: The closure will use the Kafka handle pointer.
605
606
@discardableResult
606
607
func withKafkaHandlePointer< T> ( _ body: ( OpaquePointer ) throws -> T ) rethrows -> T {
607
- return try body ( self . kafkaHandle)
608
+ return try body ( self . kafkaHandle. pointer )
608
609
}
609
610
}
0 commit comments