@@ -8,9 +8,6 @@ describe('MessageCache', () => {
8
8
. fill ( )
9
9
. map ( ( _ , i ) => ( { topic : 'topic' , partition : i % 3 , number : i } ) ) ;
10
10
11
- beforeEach ( ( ) => {
12
- } ) ;
13
-
14
11
describe ( "with concurrency" , ( ) => {
15
12
let cache ;
16
13
beforeEach ( ( ) => {
@@ -37,6 +34,31 @@ describe('MessageCache', () => {
37
34
expect ( receivedMessages . slice ( 61 , 30 ) . every ( ( msg , i ) => msg . partition === receivedMessages [ 60 ] . partition && ( msg . number - 3 ) === receivedMessages [ i ] . number ) ) . toBeTruthy ( ) ;
38
35
} ) ;
39
36
37
+ it ( 'caches messages and retrieves N of them' , ( ) => {
38
+ const msgs = messages . slice ( 0 , 90 ) ;
39
+ cache . addMessages ( msgs ) ;
40
+
41
+ const receivedMessages = [ ] ;
42
+ let nextIdx = - 1 ;
43
+ const expectedFetchedSizes = [ 11 , 11 , 8 ] ;
44
+ for ( let i = 0 ; i < ( 90 / 11 ) ; i ++ ) {
45
+ /* We choose to fetch 11 messages together rather than 10 so that we can test the case where
46
+ * remaining messages > 0 but less than requested size. */
47
+ const next = cache . nextN ( nextIdx , 11 ) ;
48
+ /* There are 30 messages per partition, the first fetch will get 11, the second 11, and the last one
49
+ * 8, and then it repeats for each partition. */
50
+ expect ( next . length ) . toBe ( expectedFetchedSizes [ i % 3 ] ) ;
51
+ expect ( next ) . not . toBeNull ( ) ;
52
+ receivedMessages . push ( ...next ) ;
53
+ nextIdx = next . index ;
54
+ }
55
+
56
+ /* Results are on a per-partition basis and well-ordered */
57
+ expect ( receivedMessages . slice ( 1 , 30 ) . every ( ( msg , i ) => msg . partition === receivedMessages [ 0 ] . partition && ( msg . number - 3 ) === receivedMessages [ i ] . number ) ) . toBeTruthy ( ) ;
58
+ expect ( receivedMessages . slice ( 31 , 30 ) . every ( ( msg , i ) => msg . partition === receivedMessages [ 30 ] . partition && ( msg . number - 3 ) === receivedMessages [ i ] . number ) ) . toBeTruthy ( ) ;
59
+ expect ( receivedMessages . slice ( 61 , 30 ) . every ( ( msg , i ) => msg . partition === receivedMessages [ 60 ] . partition && ( msg . number - 3 ) === receivedMessages [ i ] . number ) ) . toBeTruthy ( ) ;
60
+ } ) ;
61
+
40
62
it ( 'does not allow fetching more than 1 message at a time' , ( ) => {
41
63
const msgs = messages . slice ( 0 , 90 ) ;
42
64
cache . addMessages ( msgs ) ;
@@ -119,8 +141,30 @@ describe('MessageCache', () => {
119
141
nextIdxs = [ next0 . index , next1 . index ] ;
120
142
}
121
143
122
- /* Results are on a zig-zag basis. */
123
- expect ( receivedMessages . every ( ( msg , i ) => msg . number === receivedMessages . number ) ) ;
144
+ expect ( receivedMessages . length ) . toBe ( 60 ) ;
145
+ expect ( receivedMessages . filter ( msg => msg . partition === 0 ) . length ) . toBe ( 30 ) ;
146
+ expect ( receivedMessages . filter ( msg => msg . partition === 1 ) . length ) . toBe ( 30 ) ;
147
+ } ) ;
148
+
149
+ it ( 'caches messages and retrieves N of them 2-at-a-time' , ( ) => {
150
+ const msgs = messages . slice ( 0 , 90 ) . filter ( msg => msg . partition !== 3 ) ;
151
+ cache . addMessages ( msgs ) ;
152
+
153
+ const receivedMessages = [ ] ;
154
+ let nextIdxs = [ - 1 , - 1 ] ;
155
+ for ( let i = 0 ; i < 30 / 11 ; i ++ ) {
156
+ const next0 = cache . nextN ( nextIdxs [ 0 ] , 11 ) ;
157
+ const next1 = cache . nextN ( nextIdxs [ 1 ] , 11 ) ;
158
+ expect ( next0 ) . not . toBeNull ( ) ;
159
+ expect ( next1 ) . not . toBeNull ( ) ;
160
+ receivedMessages . push ( ...next0 ) ;
161
+ receivedMessages . push ( ...next1 ) ;
162
+ nextIdxs = [ next0 . index , next1 . index ] ;
163
+ }
164
+
165
+ expect ( receivedMessages . length ) . toBe ( 60 ) ;
166
+ expect ( receivedMessages . filter ( msg => msg . partition === 0 ) . length ) . toBe ( 30 ) ;
167
+ expect ( receivedMessages . filter ( msg => msg . partition === 1 ) . length ) . toBe ( 30 ) ;
124
168
} ) ;
125
169
126
170
it ( 'does not allow fetching more than 2 message at a time' , ( ) => {
@@ -141,6 +185,25 @@ describe('MessageCache', () => {
141
185
expect ( next ) . not . toBeNull ( ) ;
142
186
} ) ;
143
187
188
+
189
+ it ( 'does not allow fetching more than 2 message sets at a time' , ( ) => {
190
+ const msgs = messages . slice ( 0 , 90 ) ;
191
+ cache . addMessages ( msgs ) ;
192
+
193
+ let next = cache . nextN ( - 1 , 11 ) ;
194
+ let savedIndex = next . index ;
195
+ expect ( next ) . not . toBeNull ( ) ;
196
+ next = cache . nextN ( - 1 , 11 ) ;
197
+ expect ( next ) . not . toBeNull ( ) ;
198
+ next = cache . nextN ( - 1 , 11 ) ;
199
+ expect ( next ) . toBeNull ( ) ;
200
+ expect ( cache . pendingSize ( ) ) . toBe ( 2 ) ;
201
+
202
+ // Fetch after returning index works.
203
+ next = cache . nextN ( savedIndex , 11 ) ;
204
+ expect ( next ) . not . toBeNull ( ) ;
205
+ } ) ;
206
+
144
207
it ( 'stops fetching from stale partition' , ( ) => {
145
208
const msgs = messages . slice ( 0 , 90 ) ;
146
209
cache . addMessages ( msgs ) ;
@@ -163,6 +226,29 @@ describe('MessageCache', () => {
163
226
expect ( receivedMessages ) . toEqual ( expect . arrayContaining ( msgs . slice ( 0 , 3 ) ) ) ;
164
227
} ) ;
165
228
229
+ it ( 'stops fetching message sets from stale partition' , ( ) => {
230
+ const msgs = messages . slice ( 0 , 90 ) ;
231
+ cache . addMessages ( msgs ) ;
232
+
233
+ const receivedMessages = [ ] ;
234
+ let nextIdx = - 1 ;
235
+ for ( let i = 0 ; i < 3 ; i ++ ) {
236
+ const next = cache . nextN ( nextIdx , 11 ) ;
237
+ expect ( next ) . not . toBeNull ( ) ;
238
+ receivedMessages . push ( ...next ) ;
239
+ nextIdx = next . index ;
240
+ cache . markStale ( [ { topic : next [ 0 ] . topic , partition : next [ 0 ] . partition } ] ) ;
241
+ }
242
+
243
+ // We should not be able to get anything more.
244
+ expect ( cache . nextN ( nextIdx , 11 ) ) . toBeNull ( ) ;
245
+ // Nothing should be pending, we've returned everything.
246
+ expect ( cache . pendingSize ( ) ) . toBe ( 0 ) ;
247
+ // The first [11, 11, 11] messages from different toppars.
248
+ expect ( receivedMessages . length ) . toBe ( 33 ) ;
249
+ expect ( receivedMessages ) . toEqual ( expect . arrayContaining ( msgs . slice ( 0 , 33 ) ) ) ;
250
+ } ) ;
251
+
166
252
it ( 'one slow processing message should not slow down others' , ( ) => {
167
253
const msgs = messages . slice ( 0 , 90 ) ;
168
254
cache . addMessages ( msgs ) ;
@@ -188,6 +274,31 @@ describe('MessageCache', () => {
188
274
expect ( receivedMessages . slice ( 31 , 30 ) . every ( ( msg , i ) => msg . partition === receivedMessages [ 30 ] . partition && ( msg . number - 3 ) === receivedMessages [ i ] . number ) ) . toBeTruthy ( ) ;
189
275
} ) ;
190
276
277
+ it ( 'one slow processing message set should not slow down others' , ( ) => {
278
+ const msgs = messages . slice ( 0 , 90 ) ;
279
+ cache . addMessages ( msgs ) ;
280
+
281
+ const receivedMessages = [ ] ;
282
+ let nextIdx = - 1 ;
283
+ const slowMsg = cache . nextN ( nextIdx , 11 ) ;
284
+ for ( let i = 0 ; i < 60 / 11 ; i ++ ) { /* 60 - for non-partition 0 msgs */
285
+ const next = cache . nextN ( nextIdx , 11 ) ;
286
+ expect ( next ) . not . toBeNull ( ) ;
287
+ receivedMessages . push ( ...next ) ;
288
+ nextIdx = next . index ;
289
+ }
290
+
291
+
292
+ // We should not be able to get anything more.
293
+ expect ( cache . nextN ( nextIdx , 11 ) ) . toBeNull ( ) ;
294
+ // The slowMsg should be pending.
295
+ expect ( cache . pendingSize ( ) ) . toBe ( 1 ) ;
296
+
297
+ /* Messages should be partition-wise and well-ordered. */
298
+ expect ( receivedMessages . slice ( 1 , 30 ) . every ( ( msg , i ) => msg . partition === receivedMessages [ 0 ] . partition && ( msg . number - 3 ) === receivedMessages [ i ] . number ) ) . toBeTruthy ( ) ;
299
+ expect ( receivedMessages . slice ( 31 , 30 ) . every ( ( msg , i ) => msg . partition === receivedMessages [ 30 ] . partition && ( msg . number - 3 ) === receivedMessages [ i ] . number ) ) . toBeTruthy ( ) ;
300
+ } ) ;
301
+
191
302
it ( 'should not be able to handle cache-clearance in the middle of processing' , ( ) => {
192
303
const msgs = messages . slice ( 0 , 90 ) ;
193
304
cache . addMessages ( msgs ) ;
0 commit comments