forked from fsprojects/pulsar-client-dotnet
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathConsumerImpl.fs
More file actions
1731 lines (1538 loc) · 86 KB
/
ConsumerImpl.fs
File metadata and controls
1731 lines (1538 loc) · 86 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
namespace Pulsar.Client.Api
open System.Collections
open System.Diagnostics
open System.Threading.Tasks
open FSharp.UMX
open System.Collections.Generic
open System
open Pulsar.Client.Schema
open Pulsar.Client.Transaction
open Pulsar.Client.Internal
open Pulsar.Client.Common
open Microsoft.Extensions.Logging
open System.IO
open ProtoBuf
open pulsar.proto
open System.Threading
open System.Timers
open ConsumerBase
open System.Threading.Channels
type internal ParseResult<'T> =
| ParseOk of struct(byte[]*'T)
| ParseError of CommandAck.ValidationError
type internal ConsumerTickType =
| StatTick
| ChunkTick
type internal ConsumerMessage<'T> =
| ConnectionOpened
| ConnectionFailed of exn
| ConnectionClosed of ClientCnx
| ReachedEndOfTheTopic
| MessageReceived of struct(RawMessage * ClientCnx)
| Receive of ReceiveCallback<'T>
| BatchReceive of ReceiveCallbacks<'T>
| SendBatchByTimeout
| Acknowledge of struct(MessageId * AckType * (Transaction * TaskCompletionSource<Unit>) option)
| NegativeAcknowledge of MessageId
| RedeliverUnacknowledged of RedeliverSet
| RedeliverAllUnacknowledged of TaskCompletionSource<unit> option
| SeekAsync of SeekType * TaskCompletionSource<unit>
| HasMessageAvailable of TaskCompletionSource<bool>
| ActiveConsumerChanged of bool
| Close of TaskCompletionSource<unit>
| Unsubscribe of TaskCompletionSource<unit>
| Tick of ConsumerTickType
| GetStats of TaskCompletionSource<ConsumerStats>
| ReconsumeLater of Message<'T> * AckType * TimeStamp * TaskCompletionSource<unit>
| CancelWaiter of Waiter<'T>
| CancelBatchWaiter of BatchWaiter<'T>
| AckReceipt of RequestId
| AckError of RequestId * exn
| ClearIncomingMessagesAndGetMessageNumber of TaskCompletionSource<int>
| IncreaseAvailablePermits of int
type internal ConsumerInitInfo<'T> =
{
TopicName: TopicName
Schema: ISchema<'T>
SchemaProvider: MultiVersionSchemaInfoProvider option
Metadata: PartitionedTopicMetadata
}
type internal ConsumerImpl<'T> (consumerConfig: ConsumerConfiguration<'T>, clientConfig: PulsarClientConfiguration,
topicName: TopicName, connectionPool: ConnectionPool,
partitionIndex: int, hasParentConsumer: bool, startMessageId: MessageId option,
startMessageRollbackDuration: TimeSpan, lookup: ILookupService,
createTopicIfDoesNotExist: bool, schema: ISchema<'T>,
schemaProvider: MultiVersionSchemaInfoProvider option,
interceptors: ConsumerInterceptors<'T>, cleanup: ConsumerImpl<'T> -> unit) as this =
[<Literal>]
let MAX_REDELIVER_UNACKNOWLEDGED = 1000
let _this = this :> IConsumer<'T>
let consumerName = getConsumerName consumerConfig.ConsumerName
let consumerId = Generators.getNextConsumerId()
let incomingMessages = Queue<Message<'T>>()
let waiters = LinkedList<Waiter<'T>>()
let batchWaiters = LinkedList<BatchWaiter<'T>>()
let subscribeTsc = TaskCompletionSource<unit>(TaskCreationOptions.RunContinuationsAsynchronously)
let ackRequests = Dictionary<RequestId, MessageId * TxnId * TaskCompletionSource<unit>>()
let prefix = $"consumer({consumerId}, {consumerName}, {partitionIndex})"
let createConsumerStartTime = Stopwatch.GetTimestamp()
let mutable hasReachedEndOfTopic = false
let mutable avalablePermits = 0
let mutable startMessageId =
match startMessageId with
| Some startMsgId ->
match startMsgId.ChunkMessageIds with
| Some chunkMessageIds when chunkMessageIds.Length > 0 ->
Some(chunkMessageIds[0])
| _ -> Some(startMsgId)
| None -> None
let mutable lastMessageIdInBroker = MessageId.Earliest
let mutable lastDequeuedMessageId = MessageId.Earliest
let mutable duringSeek = None
let initialStartMessageId = startMessageId
let mutable incomingMessagesSize = 0L
let deadLettersProcessor = consumerConfig.DeadLetterProcessor topicName
let isDurable = consumerConfig.SubscriptionMode = SubscriptionMode.Durable
let stats =
if clientConfig.StatsInterval = TimeSpan.Zero then
ConsumerStatsImpl.CONSUMER_STATS_DISABLED
else
ConsumerStatsImpl(prefix) :> IConsumerStatsRecorder
let statTimer = new Timer()
let chunkTimer = new Timer()
let startStatTimer () =
if clientConfig.StatsInterval <> TimeSpan.Zero then
statTimer.Interval <- clientConfig.StatsInterval.TotalMilliseconds
statTimer.AutoReset <- true
statTimer.Elapsed.Add(fun _ -> post this.Mb (Tick StatTick))
statTimer.Start()
let startChunkTimer () =
if consumerConfig.ExpireTimeOfIncompleteChunkedMessage <> TimeSpan.Zero then
chunkTimer.Interval <- consumerConfig.ExpireTimeOfIncompleteChunkedMessage.TotalMilliseconds
chunkTimer.AutoReset <- true
chunkTimer.Elapsed.Add(fun _ -> post this.Mb (Tick ChunkTick))
chunkTimer.Start()
let keyValueProcessor = KeyValueProcessor.GetInstance schema
let connectionHandler =
ConnectionHandler(prefix,
connectionPool,
lookup,
topicName.CompleteTopicName,
(fun _ -> post this.Mb ConsumerMessage.ConnectionOpened),
(fun ex -> post this.Mb (ConsumerMessage.ConnectionFailed ex)),
Backoff({ BackoffConfig.Default with
Initial = clientConfig.InitialBackoffInterval
Max = clientConfig.MaxBackoffInterval }))
let consumerTxnOperations =
{
ClearIncomingMessagesAndGetMessageNumber = fun () ->
postAndAsyncReply this.Mb ConsumerMessage.ClearIncomingMessagesAndGetMessageNumber |> Async.AwaitTask
IncreaseAvailablePermits = fun permits ->
post this.Mb (ConsumerMessage.IncreaseAvailablePermits permits)
}
let hasMoreMessages (lastMessageIdInBroker: MessageId) (lastDequeuedMessage: MessageId) (inclusive: bool) =
if (inclusive && lastMessageIdInBroker >= lastDequeuedMessage && lastMessageIdInBroker.EntryId <> %(-1L)) then
true
elif (not inclusive && lastMessageIdInBroker > lastDequeuedMessage && lastMessageIdInBroker.EntryId <> %(-1L)) then
true
else
false
let increaseAvailablePermits delta =
avalablePermits <- avalablePermits + delta
if avalablePermits >= incomingMessages.Count / 2 then
this.SendFlowPermits avalablePermits
avalablePermits <- 0
/// Clear the internal receiver queue and returns the message id of what was the 1st message in the queue that was not seen by the application
let clearReceiverQueue() =
let nextMsg =
if incomingMessages.Count > 0 then
let nextMessageInQueue = incomingMessages.Dequeue().MessageId
incomingMessagesSize <- 0L
incomingMessages.Clear()
Some nextMessageInQueue
else
None
match duringSeek with
| Some _ as seekMsgId ->
duringSeek <- None
seekMsgId
| None when isDurable ->
startMessageId
| _ ->
match nextMsg with
| Some nextMessageInQueue ->
let previousMessage =
match nextMessageInQueue.Type with
| Batch (index, acker) ->
// Get on the previous message within the current batch
{ nextMessageInQueue with Type = Batch(index - %1, acker) }
| MessageIdType.Single ->
// Get on previous message in previous entry
{ nextMessageInQueue with EntryId = nextMessageInQueue.EntryId - %1L }
Some previousMessage
| None ->
if lastDequeuedMessageId <> MessageId.Earliest then
// If the queue was empty we need to restart from the message just after the last one that has been dequeued
// in the past
Some lastDequeuedMessageId
else
// No message was received or dequeued by this consumer. Next message would still be the startMessageId
startMessageId
let getLastMessageIdAsync() =
let rec internalGetLastMessageIdAsync(backoff: Backoff, remainingTimeMs: int) =
async {
match connectionHandler.ConnectionState with
| Ready clientCnx ->
let requestId = Generators.getNextRequestId()
let payload = Commands.newGetLastMessageId consumerId requestId
try
let! response = clientCnx.SendAndWaitForReply requestId payload |> Async.AwaitTask
return response |> PulsarResponseType.GetLastMessageId
with
| Flatten ex ->
Log.Logger.LogError(ex, "{0} failed getLastMessageId", prefix)
return reraize ex
| _ ->
let nextDelay = Math.Min(backoff.Next(), remainingTimeMs)
if nextDelay <= 0 then
return
"Couldn't get the last message id withing configured timeout"
|> TimeoutException
|> raise
else
Log.Logger.LogWarning("{0} Could not get connection while GetLastMessageId -- Will try again in {1} ms",
prefix, nextDelay)
do! Async.Sleep nextDelay
return! internalGetLastMessageIdAsync(backoff, remainingTimeMs - nextDelay)
}
match connectionHandler.ConnectionState with
| Closing | Closed ->
"Consumer is already closed"
|> AlreadyClosedException
|> Task.FromException<LastMessageIdResult>
| _ ->
let backoff = Backoff { BackoffConfig.Default with
Initial = TimeSpan.FromMilliseconds(100.0)
Max = (clientConfig.OperationTimeout + clientConfig.OperationTimeout)}
internalGetLastMessageIdAsync(backoff, int clientConfig.OperationTimeout.TotalMilliseconds) |> Async.StartAsTask
let redeliverMessages messages =
post this.Mb (RedeliverUnacknowledged messages)
let unAckedMessageRedeliver messages =
interceptors.OnAckTimeoutSend(this, messages)
redeliverMessages messages
let negativeAcksRedeliver messages =
interceptors.OnNegativeAcksSend(this, messages)
redeliverMessages messages
let unAckedMessageTracker =
if consumerConfig.AckTimeout > TimeSpan.Zero then
if consumerConfig.AckTimeoutTickTime > TimeSpan.Zero then
let tickDuration = if consumerConfig.AckTimeout > consumerConfig.AckTimeoutTickTime then consumerConfig.AckTimeoutTickTime else consumerConfig.AckTimeout
UnAckedMessageTracker(prefix, consumerConfig.AckTimeout, tickDuration, unAckedMessageRedeliver) :> IUnAckedMessageTracker
else
UnAckedMessageTracker(prefix, consumerConfig.AckTimeout, consumerConfig.AckTimeout, unAckedMessageRedeliver) :> IUnAckedMessageTracker
else
UnAckedMessageTracker.UNACKED_MESSAGE_TRACKER_DISABLED
let negativeAcksTracker = NegativeAcksTracker(prefix, consumerConfig.NegativeAckRedeliveryDelay, negativeAcksRedeliver)
let getConnectionState() = connectionHandler.ConnectionState
let sendAckPayload (cnx: ClientCnx) payload = cnx.Send payload
let acksGroupingTracker =
if topicName.IsPersistent then
AcknowledgmentsGroupingTracker(prefix, consumerId, consumerConfig.AcknowledgementsGroupTime, getConnectionState, sendAckPayload) :> IAcknowledgmentsGroupingTracker
else
AcknowledgmentsGroupingTracker.NonPersistentAcknowledgmentGroupingTracker
let trackMessage msgId =
if not hasParentConsumer then
unAckedMessageTracker.Add msgId
let untrackMessage msgId =
if not hasParentConsumer then
unAckedMessageTracker.Remove msgId
let untrackMessagesTill msgId =
if not hasParentConsumer then
unAckedMessageTracker.RemoveMessagesTill msgId
else
zeroTask
let sendAcknowledge (messageId: MessageId) ackType properties =
match ackType with
| Individual ->
match messageId.Type with
| MessageIdType.Single ->
untrackMessage messageId
stats.IncrementNumAcksSent(1)
| Batch (_, batch) ->
let batchSize = batch.GetBatchSize()
for i in 0..batchSize-1 do
untrackMessage { messageId with Type = Batch(%i, batch) }
stats.IncrementNumAcksSent(batchSize)
interceptors.OnAcknowledge(this, messageId, null)
deadLettersProcessor.RemoveMessage messageId
| AckType.Cumulative ->
interceptors.OnAcknowledgeCumulative(this, messageId, null)
backgroundTask {
let! count = untrackMessagesTill messageId
stats.IncrementNumAcksSent(count)
} |> ignore
acksGroupingTracker.AddAcknowledgment(messageId, ackType, properties)
// Consumer acknowledgment operation immediately succeeds. In any case, if we're not able to send ack to broker,
// the messages will be re-delivered
let ackOrTrack msgId autoAck =
if autoAck then
Log.Logger.LogInformation("{0} Removing chunk message-id {1}", prefix, msgId)
sendAcknowledge msgId Individual EmptyProperties
else
trackMessage msgId
let chunkedMessageTracker = ChunkedMessageTracker(prefix, consumerConfig.MaxPendingChunkedMessage,
consumerConfig.AutoAckOldestChunkedMessageOnQueueFull,
consumerConfig.ExpireTimeOfIncompleteChunkedMessage,
ackOrTrack)
let markAckForBatchMessage (msgId: MessageId) ((batchIndex, batchAcker): BatchDetails) ackType properties =
let isAllMsgsAcked =
match ackType with
| Individual ->
batchAcker.AckIndividual(batchIndex)
| AckType.Cumulative ->
batchAcker.AckCumulative(batchIndex)
let outstandingAcks = batchAcker.GetOutstandingAcks()
let batchSize = batchAcker.GetBatchSize()
if isAllMsgsAcked then
if Log.Logger.IsEnabled LogLevel.Debug then
Log.Logger.LogDebug("{0} can ack message acktype {1}, cardinality {2}, length {3}",
prefix, ackType, outstandingAcks, batchSize)
true
else
match ackType with
| AckType.Cumulative ->
if not batchAcker.PrevBatchCumulativelyAcked then
sendAcknowledge msgId.PrevBatchMessageId ackType properties
Log.Logger.LogDebug("{0} update PrevBatchCumulativelyAcked", prefix)
batchAcker.PrevBatchCumulativelyAcked <- true
interceptors.OnAcknowledgeCumulative(this, msgId, null)
| Individual ->
interceptors.OnAcknowledge(this, msgId, null)
if Log.Logger.IsEnabled LogLevel.Debug then
Log.Logger.LogDebug("{0} cannot ack message acktype {1}, cardinality {2}, length {3}",
prefix, ackType, outstandingAcks, batchSize)
false
let doTransactionAcknowledgeForResponse ackType properties txnId tcs (messageId: MessageId) =
match connectionHandler.ConnectionState with
| Ready clientCnx ->
let requestId = Generators.getNextRequestId()
let command =
match messageId.Type with
| Batch (batchIndex, batchAcker) ->
let ackSet =
match ackType with
| Cumulative ->
batchAcker.AckCumulative(batchIndex) |> ignore
let bitSet = BitArray(batchAcker.GetBatchSize(), true)
for i in 0 .. %batchIndex do
bitSet[i] <- false
bitSet
| Individual ->
let bitSet = BitArray(batchAcker.GetBatchSize(), true)
bitSet[%batchIndex] <- false
bitSet
let allBitsAreZero =
let mutable result = true
let mutable i = 0
while result && i < ackSet.Length do
result <- not ackSet[i]
i <- i + 1
result
let adjustedSet =
if allBitsAreZero then
// hack to conform Java
[||]
else
ackSet |> toLongArray
Commands.newAck consumerId messageId.LedgerId messageId.EntryId ackType properties adjustedSet
None (Some txnId) (Some requestId) (batchAcker.GetBatchSize() |> Some)
| _ ->
Commands.newAck consumerId messageId.LedgerId messageId.EntryId ackType properties null
None (Some txnId) (Some requestId) None
ackRequests.Add(requestId, (messageId, txnId, tcs))
match ackType with
| Individual -> unAckedMessageTracker.Remove(messageId)
| AckType.Cumulative -> unAckedMessageTracker.RemoveMessagesTill(messageId) |> ignore
clientCnx.SendAndForget(command)
| _ ->
Log.Logger.LogWarning("{0} not ready, can't do transactionAcknowledge. State: {1}",
prefix, connectionHandler.ConnectionState)
tcs.SetException(NotConnectedException("TransactionAcknowledge failed"))
let trySendAcknowledge ackType properties (txnOption: (Transaction * TaskCompletionSource<unit>) option) (messageId: MessageId) =
match txnOption with
| Some (txn, tcs) ->
doTransactionAcknowledgeForResponse ackType properties txn.Id tcs messageId
| None ->
match messageId.Type with
| Batch batchDetails when not (markAckForBatchMessage messageId batchDetails ackType properties) ->
if consumerConfig.BatchIndexAcknowledgmentEnabled then
acksGroupingTracker.AddBatchIndexAcknowledgment(messageId, ackType, properties)
// other messages in batch are still pending ack.
| _ ->
sendAcknowledge messageId ackType properties
if Log.Logger.IsEnabled LogLevel.Debug then
Log.Logger.LogDebug("{0} acknowledged message - {1}, acktype {2}", prefix, messageId, ackType)
let isPriorEntryIndex idx =
match startMessageId with
| None -> false
| Some startMsgId ->
if consumerConfig.ResetIncludeHead then
idx < startMsgId.EntryId
else
idx <= startMsgId.EntryId
let isPriorBatchIndex idx =
match startMessageId with
| None -> false
| Some startMsgId ->
match startMsgId.Type with
| MessageIdType.Single -> false
| Batch (batchIndex, _) ->
if consumerConfig.ResetIncludeHead then
idx < batchIndex
else
idx <= batchIndex
let isSameEntry (msgId: MessageId) =
match startMessageId with
| None ->
false
| Some startMsgId ->
startMsgId.LedgerId = msgId.LedgerId
&& startMsgId.EntryId = msgId.EntryId
let getSchemaVersionBytes (schemaVersion: SchemaVersion option) =
schemaVersion
|> Option.map _.Bytes
|> Option.defaultValue null
let clearDeadLetters() = deadLettersProcessor.ClearMessages()
let clearAckRequests() =
for KeyValue(_, (_, _, tcs)) in ackRequests do
stats.IncrementNumAcksFailed()
tcs.SetException(MessageAcknowledgeException "Consumer was closed")
ackRequests.Clear()
let getNewIndividualMsgIdWithPartition messageId =
{ messageId with Type = MessageIdType.Single; Partition = partitionIndex; TopicName = %"" }
let processPossibleToDLQ (messageId : MessageId) =
let acknowledge = trySendAcknowledge Individual EmptyProperties None
backgroundTask {
try
return! deadLettersProcessor.ProcessMessage(messageId, acknowledge)
with Flatten ex ->
Log.Logger.LogError(ex, "Failed to send DLQ message to {0} for message id {1}",
deadLettersProcessor.TopicName, messageId)
return falseTask
}
let getRedeliveryMessageIdData ids =
backgroundTask {
let isDeadTasks = ResizeArray()
for messageId in ids do
let! isDeadTask = processPossibleToDLQ messageId
isDeadTasks.Add (messageId, isDeadTask)
let! results =
isDeadTasks
|> Seq.map(fun (messageId, isDeadTask) ->
backgroundTask {
let! isDead = isDeadTask
return
if isDead then
None
else
MessageIdData(
Partition = messageId.Partition,
ledgerId = uint64 %messageId.LedgerId,
entryId = uint64 %messageId.EntryId)
|> Some
}
)
|> Task.WhenAll
return
results
|> Array.choose id
}
let enqueueMessage (msg: Message<'T>) =
incomingMessagesSize <- incomingMessagesSize + msg.Data.LongLength
incomingMessages.Enqueue(msg)
let dequeueMessage() =
let msg = incomingMessages.Dequeue()
incomingMessagesSize <- incomingMessagesSize - msg.Data.LongLength
msg
let hasEnoughMessagesForBatchReceive() =
hasEnoughMessagesForBatchReceive consumerConfig.BatchReceivePolicy incomingMessages.Count incomingMessagesSize
/// Record the event that one message has been processed by the application.
/// Periodically, it sends a Flow command to notify the broker that it can push more messages
let messageProcessed (msg: Message<'T>) =
lastDequeuedMessageId <- msg.MessageId
if consumerConfig.ReceiverQueueSize > 0 then
//don't increase for zero queue consumer
increaseAvailablePermits 1
stats.UpdateNumMsgsReceived(msg.Data.Length)
trackMessage msg.MessageId
let replyWithBatch (channel: TaskCompletionSource<Messages<'T>>) =
let messages = Messages(consumerConfig.BatchReceivePolicy.MaxNumMessages, consumerConfig.BatchReceivePolicy.MaxNumBytes)
let mutable shouldContinue = true
while shouldContinue && incomingMessages.Count > 0 do
let msgPeeked = incomingMessages.Peek()
if messages.CanAdd(msgPeeked) then
let msg = dequeueMessage()
messageProcessed msg
messages.Add(interceptors.BeforeConsume(this, msg))
else
shouldContinue <- false
Log.Logger.LogDebug("{0} BatchFormed with size {1}", prefix, messages.Size)
channel.SetResult messages
let removeExpiredMessagesFromQueue (msgIds: RedeliverSet) =
if incomingMessages.Count > 0 then
let peek = incomingMessages.Peek()
if msgIds.Contains peek.MessageId then
// try not to remove elements that are added while we remove
let mutable finish = false
let mutable messagesFromQueue = 0
while not finish && incomingMessages.Count > 0 do
let message = dequeueMessage()
messagesFromQueue <- messagesFromQueue + 1
if msgIds.Contains(message.MessageId) |> not then
msgIds.Add(message.MessageId) |> ignore
finish <- true
messagesFromQueue
else
// first message is not expired, then no message is expired in queue.
0
else
0
let replyWithMessage (channel: TaskCompletionSource<Message<'T>>) message =
messageProcessed message
let interceptMsg = interceptors.BeforeConsume(this, message)
channel.SetResult interceptMsg
let failWaiters (ex: exn) =
while waiters.Count > 0 do
let waitingChannel = waiters |> dequeueWaiter
waitingChannel.TrySetException ex |> ignore
while batchWaiters.Count > 0 do
let batchWaitingChannel = batchWaiters |> dequeueBatchWaiter
batchWaitingChannel.TrySetException ex |> ignore
let closeConsumerTasks() =
unAckedMessageTracker.Close()
acksGroupingTracker.Close()
clearDeadLetters()
clearAckRequests()
negativeAcksTracker.Close()
connectionHandler.Close()
interceptors.Close()
statTimer.Stop()
chunkTimer.Stop()
cleanup(this)
let stopConsumer () =
closeConsumerTasks()
failWaiters <| AlreadyClosedException "Consumer is already closed"
Log.Logger.LogInformation("{0} stopped", prefix)
let decryptMessage (rawMessage:RawMessage) =
if rawMessage.Metadata.EncryptionKeys.Length = 0 then
Ok rawMessage
else
try
match consumerConfig.MessageDecryptor with
| Some msgCrypto ->
let encryptionKeys = rawMessage.Metadata.EncryptionKeys
let encMsg = EncryptedMessage(rawMessage.Payload.ToArray(), encryptionKeys,
rawMessage.Metadata.EncryptionAlgo, rawMessage.Metadata.EncryptionParam)
let decryptPayload = msgCrypto.Decrypt(encMsg)
{ rawMessage with Payload = new MemoryStream(decryptPayload) } |> Ok
| None ->
raise <| CryptoException "Message is encrypted, but no encryption configured"
with ex ->
Log.Logger.LogWarning(ex, "{0} Decryption exception {1}", prefix, rawMessage.MessageId)
Error ex
let decompressMessage (rawMessage: RawMessage) isChunked =
if isChunked then
Ok rawMessage
else
try
let compressionCodec = rawMessage.Metadata.CompressionType |> CompressionCodec.get
let uncompressedPayload = compressionCodec.Decode(rawMessage.Metadata.UncompressedMessageSize, rawMessage.Payload)
Ok { rawMessage with Payload = uncompressedPayload }
with ex ->
Log.Logger.LogError(ex, "{0} Decompression exception {1}", prefix, rawMessage.MessageId)
Error ex
let discardCorruptedMessage (msgId: MessageId) (clientCnx: ClientCnx) err =
backgroundTask {
let command = Commands.newAck consumerId msgId.LedgerId msgId.EntryId Individual
EmptyProperties null (Some err) None None None
let! discardResult = clientCnx.Send command
if discardResult then
Log.Logger.LogInformation("{0} Message {1} was discarded due to {2}", prefix, msgId, err)
else
Log.Logger.LogWarning("{0} Unable to discard {1} due to {2}", prefix, msgId, err)
stats.IncrementNumReceiveFailed()
}
let getSchemaDecodeFunction (metadata: Metadata) =
backgroundTask {
// mutable workaround for unsupported let! inside let
let mutable schemaDecodeFunction = Unchecked.defaultof<byte[]->'T>
if schemaProvider.IsNone || metadata.SchemaVersion.IsNone then
schemaDecodeFunction <- schema.Decode
else
let schemaVersion = metadata.SchemaVersion.Value
try
let! specificSchemaOption = schemaProvider.Value.GetSchemaByVersion(schema, schemaVersion)
schemaDecodeFunction <-
match specificSchemaOption with
| Some specificSchema -> specificSchema.Decode
| None -> schema.Decode
with Flatten ex ->
Log.Logger.LogError(ex, "{0} Couldn't get schema by version", prefix)
schemaDecodeFunction <- fun _ -> raise <| SchemaSerializationException ex.Message
return schemaDecodeFunction
}
let processMessageChunk (rawMessage: RawMessage) msgId =
match chunkedMessageTracker.GetContext(rawMessage.Metadata) with
| Ok chunkedContext ->
let compressionCodec = rawMessage.Metadata.CompressionType |> CompressionCodec.get
match chunkedMessageTracker.MessageReceived(rawMessage, msgId, chunkedContext, compressionCodec) with
| Some payloadAndMessageId ->
Some payloadAndMessageId
| None ->
increaseAvailablePermits 1
None
| Error error ->
// discard message if chunk is out-of-order
Log.Logger.LogWarning("{0} {1} msgId = {2}", prefix, error, rawMessage.MessageId)
increaseAvailablePermits 1
let publishDateTime = convertToDateTime %rawMessage.Metadata.PublishTime
if consumerConfig.ExpireTimeOfIncompleteChunkedMessage > TimeSpan.Zero &&
DateTime.UtcNow > publishDateTime.Add(consumerConfig.ExpireTimeOfIncompleteChunkedMessage) then
sendAcknowledge rawMessage.MessageId Individual EmptyProperties
else
trackMessage rawMessage.MessageId
None
let handleSingleMessagePayload (rawMessage: RawMessage) msgId payload hasWaitingChannel hasWaitingBatchChannel schemaDecodeFunction =
if duringSeek.IsSome || (isSameEntry(rawMessage.MessageId) && isPriorEntryIndex(rawMessage.MessageId.EntryId)) then
// We need to discard entries that were prior to startMessageId
Log.Logger.LogInformation("{0} Ignoring message from before the startMessageId: {1}", prefix, startMessageId)
else
let msgKey = rawMessage.MessageKey
let getValue () =
if rawMessage.Metadata.NullValue then
Unchecked.defaultof<'T>
else
keyValueProcessor
|> Option.map (fun kvp -> kvp.DecodeKeyValue(msgKey, payload) :?> 'T)
|> Option.defaultWith (fun () -> schemaDecodeFunction payload)
let message = Message(
msgId,
payload,
%msgKey,
rawMessage.IsKeyBase64Encoded,
rawMessage.Properties,
EncryptionContext.FromMetadata rawMessage.Metadata,
getSchemaVersionBytes rawMessage.Metadata.SchemaVersion,
rawMessage.Metadata.SequenceId,
rawMessage.Metadata.OrderingKey,
rawMessage.Metadata.PublishTime,
rawMessage.Metadata.EventTime,
rawMessage.RedeliveryCount,
rawMessage.Metadata.ReplicatedFrom,
getValue
)
if (rawMessage.RedeliveryCount >= deadLettersProcessor.MaxRedeliveryCount) then
deadLettersProcessor.AddMessage(message.MessageId, message)
if hasWaitingChannel then
let waitingChannel = waiters |> dequeueWaiter
if (incomingMessages.Count = 0) then
replyWithMessage waitingChannel message
else
enqueueMessage message
replyWithMessage waitingChannel <| dequeueMessage()
else
enqueueMessage message
if hasWaitingBatchChannel && hasEnoughMessagesForBatchReceive() then
let ch = batchWaiters |> dequeueBatchWaiter
replyWithBatch ch
let handleMessagePayload clientCnx (rawMessage: RawMessage) msgId hasWaitingChannel hasWaitingBatchChannel
isMessageUndecryptable isChunkedMessage schemaDecodeFunction =
backgroundTask {
let! isDuplicate = acksGroupingTracker.IsDuplicate msgId
if isDuplicate then
Log.Logger.LogWarning("{0} Ignoring message as it was already being acked earlier by same consumer {1}", prefix, msgId)
increaseAvailablePermits rawMessage.Metadata.NumMessages
else
if isMessageUndecryptable || (rawMessage.Metadata.NumMessages = 1 && not rawMessage.Metadata.HasNumMessagesInBatch) then
// right now, chunked messages are only supported by non-shared subscription
if isChunkedMessage then
match processMessageChunk rawMessage msgId with
| Some (chunkedPayload, msgIdWithChunk) ->
handleSingleMessagePayload rawMessage msgIdWithChunk chunkedPayload hasWaitingChannel hasWaitingBatchChannel schemaDecodeFunction
| None ->
rawMessage.Payload.Dispose()
else
let bytes = rawMessage.Payload.ToArray()
rawMessage.Payload.Dispose()
handleSingleMessagePayload rawMessage msgId bytes hasWaitingChannel hasWaitingBatchChannel schemaDecodeFunction
elif rawMessage.Metadata.NumMessages > 0 then
// handle batch message enqueuing; uncompressed payload has all messages in batch
match wrapException (fun () ->
this.ReceiveIndividualMessagesFromBatch rawMessage schemaDecodeFunction) with
| Ok () ->
// try respond to channel
if hasWaitingChannel && incomingMessages.Count > 0 then
let waitingChannel = waiters |> dequeueWaiter
replyWithMessage waitingChannel <| dequeueMessage()
elif hasWaitingBatchChannel && hasEnoughMessagesForBatchReceive() then
let ch = batchWaiters |> dequeueBatchWaiter
replyWithBatch ch
| Error ex ->
Log.Logger.LogError(ex, "{0} Batch reading exception {1}", prefix, msgId)
do! discardCorruptedMessage msgId clientCnx CommandAck.ValidationError.BatchDeSerializeError
else
Log.Logger.LogWarning("{0} Received message with nonpositive numMessages: {1}", prefix, rawMessage.Metadata.NumMessages)
}
let receive (receiveCallback: ReceiveCallback<'T>) =
Log.Logger.LogDebug("{0} Receive", prefix)
let cancellationToken = receiveCallback.CancellationToken
let channel = receiveCallback.MessageChannel
if cancellationToken.IsCancellationRequested then
channel.SetCanceled()
else
if incomingMessages.Count > 0 then
replyWithMessage channel <| dequeueMessage()
else
let tokenRegistration =
if cancellationToken.CanBeCanceled then
let rec cancellationTokenRegistration =
cancellationToken.Register((fun () ->
Log.Logger.LogDebug("{0} receive cancelled", prefix)
post this.Mb (CancelWaiter(cancellationTokenRegistration, channel))
), false) |> Some
cancellationTokenRegistration
else
None
waiters.AddLast(struct(tokenRegistration, channel)) |> ignore
Log.Logger.LogDebug("{0} Receive waiting", prefix)
let batchReceive (receiveCallbacks: ReceiveCallbacks<'T>) =
Log.Logger.LogDebug("{0} BatchReceive", prefix)
let cancellationToken = receiveCallbacks.CancellationToken
let channel = receiveCallbacks.MessagesChannel
if cancellationToken.IsCancellationRequested then
channel.SetCanceled()
else
if batchWaiters.Count = 0 && hasEnoughMessagesForBatchReceive() then
replyWithBatch channel
else
let batchCts = new CancellationTokenSource()
let registration =
if cancellationToken.CanBeCanceled then
let rec cancellationTokenRegistration =
cancellationToken.Register((fun () ->
Log.Logger.LogDebug("{0} batch receive cancelled", prefix)
post this.Mb (CancelBatchWaiter(batchCts, cancellationTokenRegistration, channel))
), false) |> Some
cancellationTokenRegistration
else
None
batchWaiters.AddLast(struct(batchCts, registration, channel)) |> ignore
asyncDelay
consumerConfig.BatchReceivePolicy.Timeout
(fun () ->
if not batchCts.IsCancellationRequested then
post this.Mb SendBatchByTimeout)
Log.Logger.LogDebug("{0} BatchReceive waiting", prefix)
let consumerOperations = {
MessageReceived = fun rawMessage -> post this.Mb (MessageReceived rawMessage)
ReachedEndOfTheTopic = fun () -> post this.Mb ReachedEndOfTheTopic
ActiveConsumerChanged = fun isActive -> post this.Mb (ActiveConsumerChanged isActive)
ConnectionClosed = fun clientCnx -> post this.Mb (ConnectionClosed clientCnx)
AckError = fun (reqId, ex) -> post this.Mb (AckError(reqId, ex))
AckReceipt = fun reqId -> post this.Mb (AckReceipt(reqId))
}
let mb = Channel.CreateUnbounded<ConsumerMessage<'T>>(UnboundedChannelOptions(SingleReader = true, AllowSynchronousContinuations = true))
do (backgroundTask {
let mutable continueLoop = true
while continueLoop do
match! mb.Reader.ReadAsync() with
| ConsumerMessage.ConnectionOpened ->
match connectionHandler.ConnectionState with
| Ready clientCnx ->
Log.Logger.LogInformation("{0} starting subscribe to topic {1}", prefix, topicName)
clientCnx.AddConsumer(consumerId, consumerOperations)
let requestId = Generators.getNextRequestId()
startMessageId <- clearReceiverQueue()
clearDeadLetters()
let msgIdData =
if isDurable then
null
else
match startMessageId with
| None ->
Log.Logger.LogWarning("{0} Start messageId is missing", prefix)
null
| Some msgId ->
let data = MessageIdData(ledgerId = uint64 %msgId.LedgerId, entryId = uint64 %msgId.EntryId)
match msgId.Type with
| MessageIdType.Single ->
()
| Batch (index, _) ->
data.BatchIndex <- %index
data
// startMessageRollbackDurationInSec should be consider only once when consumer connects to first time
let startMessageRollbackDuration =
if startMessageRollbackDuration > TimeSpan.Zero && startMessageId = initialStartMessageId then
startMessageRollbackDuration
else
TimeSpan.Zero
let payload =
Commands.newSubscribe
topicName.CompleteTopicName consumerConfig.SubscriptionName
consumerId requestId consumerName consumerConfig.SubscriptionType
consumerConfig.SubscriptionInitialPosition consumerConfig.ReadCompacted msgIdData isDurable
startMessageRollbackDuration createTopicIfDoesNotExist consumerConfig.KeySharedPolicy
schema.SchemaInfo consumerConfig.PriorityLevel consumerConfig.ReplicateSubscriptionState
try
let! response = clientCnx.SendAndWaitForReply requestId payload
response |> PulsarResponseType.GetEmpty
this.ConsumerIsReconnectedToBroker()
connectionHandler.ResetBackoff()
let initialFlowCount = consumerConfig.ReceiverQueueSize
subscribeTsc.TrySetResult() |> ignore
if initialFlowCount <> 0 then
increaseAvailablePermits initialFlowCount
with Flatten ex ->
clientCnx.RemoveConsumer consumerId
Log.Logger.LogError(ex, "{0} failed to subscribe to topic", prefix)
match ex with
| _ when PulsarClientException.isRetriableError ex
&& (Stopwatch.GetElapsedTime(createConsumerStartTime) > clientConfig.OperationTimeout) ->
connectionHandler.ReconnectLater ex
| _ when not subscribeTsc.Task.IsCompleted ->
// unable to create new consumer, fail operation
connectionHandler.Failed()
subscribeTsc.SetException(ex)
stopConsumer()
continueLoop <- false
| :? TopicDoesNotExistException ->
// The topic was deleted after the consumer was created, and we're
// not allowed to recreate the topic. This can happen in few cases:
// * Regex consumer getting error after topic gets deleted
// * Regular consumer after topic is manually delete and with
// auto-topic-creation set to false
// No more retries are needed in this case.
connectionHandler.Failed()
closeConsumerTasks()
Log.Logger.LogWarning("{0} Closed consumer because topic does not exist anymore. {1}", prefix, ex.Message)
continueLoop <- false
| _ ->
// consumer was subscribed and connected but we got some error, keep trying
connectionHandler.ReconnectLater ex
| _ ->
Log.Logger.LogWarning("{0} connection opened but connection state is {1}", prefix, connectionHandler.ConnectionState)
| ConsumerMessage.MessageReceived (rawMessage, clientCnx) ->
let hasWaitingChannel = waiters.Count > 0
let hasWaitingBatchChannel = batchWaiters.Count > 0
let msgId = getNewIndividualMsgIdWithPartition rawMessage.MessageId
if Log.Logger.IsEnabled LogLevel.Debug then
Log.Logger.LogDebug("{0} MessageReceived {1} queueLength={2}, hasWaitingChannel={3}, hasWaitingBatchChannel={4}",
prefix, msgId, incomingMessages.Count, hasWaitingChannel, hasWaitingBatchChannel)
if rawMessage.CheckSumValid then
let! isDuplicate = acksGroupingTracker.IsDuplicate msgId
if isDuplicate |> not then
let! schemaDecodeFunction = getSchemaDecodeFunction rawMessage.Metadata
let isChunked = rawMessage.Metadata.NumChunks > 1 && consumerConfig.SubscriptionType <> SubscriptionType.Shared
match decryptMessage rawMessage with
| Ok decryptedMessage ->
if decryptedMessage.Payload.Length <= clientCnx.MaxMessageSize then
match decompressMessage decryptedMessage isChunked with
| Ok decompressedMessage ->
do! handleMessagePayload clientCnx
decompressedMessage msgId hasWaitingChannel hasWaitingBatchChannel false
isChunked schemaDecodeFunction
| Error _ ->
do! discardCorruptedMessage msgId clientCnx CommandAck.ValidationError.DecompressionError
else
do! discardCorruptedMessage msgId clientCnx CommandAck.ValidationError.UncompressedSizeCorruption
| Error _ ->
match consumerConfig.ConsumerCryptoFailureAction with
| ConsumerCryptoFailureAction.CONSUME ->
Log.Logger.LogWarning("{0} {1} Decryption failed. Consuming encrypted message.",
prefix, msgId)
do! handleMessagePayload clientCnx
rawMessage msgId hasWaitingChannel hasWaitingBatchChannel true
isChunked schemaDecodeFunction
| ConsumerCryptoFailureAction.DISCARD ->
Log.Logger.LogWarning("{0} {1}. Decryption failed. Discarding encrypted message.",
prefix, msgId)
do! discardCorruptedMessage msgId clientCnx CommandAck.ValidationError.DecryptionError
| ConsumerCryptoFailureAction.FAIL ->
Log.Logger.LogError("{0} {1}. Decryption failed. Failing encrypted message.",
prefix, msgId)
unAckedMessageTracker.Add msgId
| _ ->
failwith "Unknown ConsumerCryptoFailureAction"
else
Log.Logger.LogWarning("{0} Ignoring message as it was already being acked earlier by same consumer {1}", prefix, msgId)
increaseAvailablePermits rawMessage.Metadata.NumMessages
else
do! discardCorruptedMessage msgId clientCnx CommandAck.ValidationError.ChecksumMismatch
| ConsumerMessage.Receive receiveCallback ->
receive receiveCallback
| ConsumerMessage.Acknowledge (messageId, ackType, txnOption) ->
if Log.Logger.IsEnabled LogLevel.Debug then
Log.Logger.LogDebug("{0} Acknowledge {1} {2} {3}", prefix, messageId, ackType, txnOption)
trySendAcknowledge ackType EmptyProperties txnOption messageId
| ConsumerMessage.BatchReceive receiveCallbacks ->
batchReceive receiveCallbacks
| ConsumerMessage.SendBatchByTimeout ->
Log.Logger.LogDebug("{0} SendBatchByTimeout", prefix)
if batchWaiters.Count > 0 then
let ch = batchWaiters |> dequeueBatchWaiter
replyWithBatch ch
| ConsumerMessage.NegativeAcknowledge messageId ->
Log.Logger.LogDebug("{0} NegativeAcknowledge {1}", prefix, messageId)
negativeAcksTracker.Add(messageId) |> ignore
// Ensure the message is not redelivered for ack-timeout, since we did receive an "ack"
untrackMessage messageId
| ConsumerMessage.CancelWaiter waiter ->
if waiters.Remove waiter then
Log.Logger.LogDebug("{0} CancelWaiter, removed waiter", prefix)
let struct(ctrOpt, channel) = waiter
channel.SetCanceled()
ctrOpt |> Option.iter _.Dispose()
else
Log.Logger.LogDebug("{0} CancelWaiter, no waiter found", prefix)
| ConsumerMessage.CancelBatchWaiter batchWaiter ->
if batchWaiters.Remove batchWaiter then
Log.Logger.LogDebug("{0} CancelBatchWaiter, removed waiter", prefix)
let struct(batchCts, ctrOpt, channel) = batchWaiter
batchCts.Cancel()
batchCts.Dispose()
channel.SetCanceled()
ctrOpt |> Option.iter _.Dispose()
else
Log.Logger.LogDebug("{0} CancelBatchWaiter, no waiter found", prefix)
| ConsumerMessage.AckReceipt reqId ->
match ackRequests.TryGetValue(reqId) with
| true, (msgId, txnId, tcs) ->
ackRequests.Remove(reqId) |> ignore
tcs.SetResult()
Log.Logger.LogDebug("{0} MessageId : {1} has ack receipt by TxnId : {2}", prefix, msgId, txnId)