@@ -33,6 +33,11 @@ static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
33
33
(* nft_set_ext_flags (& rbe -> ext ) & NFT_SET_ELEM_INTERVAL_END );
34
34
}
35
35
36
+ static bool nft_rbtree_interval_start (const struct nft_rbtree_elem * rbe )
37
+ {
38
+ return !nft_rbtree_interval_end (rbe );
39
+ }
40
+
36
41
static bool nft_rbtree_equal (const struct nft_set * set , const void * this ,
37
42
const struct nft_rbtree_elem * interval )
38
43
{
@@ -64,7 +69,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
64
69
if (interval &&
65
70
nft_rbtree_equal (set , this , interval ) &&
66
71
nft_rbtree_interval_end (rbe ) &&
67
- ! nft_rbtree_interval_end (interval ))
72
+ nft_rbtree_interval_start (interval ))
68
73
continue ;
69
74
interval = rbe ;
70
75
} else if (d > 0 )
@@ -89,7 +94,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
89
94
90
95
if (set -> flags & NFT_SET_INTERVAL && interval != NULL &&
91
96
nft_set_elem_active (& interval -> ext , genmask ) &&
92
- ! nft_rbtree_interval_end (interval )) {
97
+ nft_rbtree_interval_start (interval )) {
93
98
* ext = & interval -> ext ;
94
99
return true;
95
100
}
@@ -208,8 +213,43 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
208
213
u8 genmask = nft_genmask_next (net );
209
214
struct nft_rbtree_elem * rbe ;
210
215
struct rb_node * parent , * * p ;
216
+ bool overlap = false;
211
217
int d ;
212
218
219
+ /* Detect overlaps as we descend the tree. Set the flag in these cases:
220
+ *
221
+ * a1. |__ _ _? >|__ _ _ (insert start after existing start)
222
+ * a2. _ _ __>| ?_ _ __| (insert end before existing end)
223
+ * a3. _ _ ___| ?_ _ _>| (insert end after existing end)
224
+ * a4. >|__ _ _ _ _ __| (insert start before existing end)
225
+ *
226
+ * and clear it later on, as we eventually reach the points indicated by
227
+ * '?' above, in the cases described below. We'll always meet these
228
+ * later, locally, due to tree ordering, and overlaps for the intervals
229
+ * that are the closest together are always evaluated last.
230
+ *
231
+ * b1. |__ _ _! >|__ _ _ (insert start after existing end)
232
+ * b2. _ _ __>| !_ _ __| (insert end before existing start)
233
+ * b3. !_____>| (insert end after existing start)
234
+ *
235
+ * Case a4. resolves to b1.:
236
+ * - if the inserted start element is the leftmost, because the '0'
237
+ * element in the tree serves as end element
238
+ * - otherwise, if an existing end is found. Note that end elements are
239
+ * always inserted after corresponding start elements.
240
+ *
241
+ * For a new, rightmost pair of elements, we'll hit cases b1. and b3.,
242
+ * in that order.
243
+ *
244
+ * The flag is also cleared in two special cases:
245
+ *
246
+ * b4. |__ _ _!|<_ _ _ (insert start right before existing end)
247
+ * b5. |__ _ >|!__ _ _ (insert end right after existing start)
248
+ *
249
+ * which always happen as last step and imply that no further
250
+ * overlapping is possible.
251
+ */
252
+
213
253
parent = NULL ;
214
254
p = & priv -> root .rb_node ;
215
255
while (* p != NULL ) {
@@ -218,17 +258,42 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
218
258
d = memcmp (nft_set_ext_key (& rbe -> ext ),
219
259
nft_set_ext_key (& new -> ext ),
220
260
set -> klen );
221
- if (d < 0 )
261
+ if (d < 0 ) {
222
262
p = & parent -> rb_left ;
223
- else if (d > 0 )
263
+
264
+ if (nft_rbtree_interval_start (new )) {
265
+ overlap = nft_rbtree_interval_start (rbe ) &&
266
+ nft_set_elem_active (& rbe -> ext ,
267
+ genmask );
268
+ } else {
269
+ overlap = nft_rbtree_interval_end (rbe ) &&
270
+ nft_set_elem_active (& rbe -> ext ,
271
+ genmask );
272
+ }
273
+ } else if (d > 0 ) {
224
274
p = & parent -> rb_right ;
225
- else {
275
+
276
+ if (nft_rbtree_interval_end (new )) {
277
+ overlap = nft_rbtree_interval_end (rbe ) &&
278
+ nft_set_elem_active (& rbe -> ext ,
279
+ genmask );
280
+ } else if (nft_rbtree_interval_end (rbe ) &&
281
+ nft_set_elem_active (& rbe -> ext , genmask )) {
282
+ overlap = true;
283
+ }
284
+ } else {
226
285
if (nft_rbtree_interval_end (rbe ) &&
227
- ! nft_rbtree_interval_end (new )) {
286
+ nft_rbtree_interval_start (new )) {
228
287
p = & parent -> rb_left ;
229
- } else if (!nft_rbtree_interval_end (rbe ) &&
288
+
289
+ if (nft_set_elem_active (& rbe -> ext , genmask ))
290
+ overlap = false;
291
+ } else if (nft_rbtree_interval_start (rbe ) &&
230
292
nft_rbtree_interval_end (new )) {
231
293
p = & parent -> rb_right ;
294
+
295
+ if (nft_set_elem_active (& rbe -> ext , genmask ))
296
+ overlap = false;
232
297
} else if (nft_set_elem_active (& rbe -> ext , genmask )) {
233
298
* ext = & rbe -> ext ;
234
299
return - EEXIST ;
@@ -237,6 +302,10 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
237
302
}
238
303
}
239
304
}
305
+
306
+ if (overlap )
307
+ return - ENOTEMPTY ;
308
+
240
309
rb_link_node_rcu (& new -> node , parent , p );
241
310
rb_insert_color (& new -> node , & priv -> root );
242
311
return 0 ;
@@ -317,10 +386,10 @@ static void *nft_rbtree_deactivate(const struct net *net,
317
386
parent = parent -> rb_right ;
318
387
else {
319
388
if (nft_rbtree_interval_end (rbe ) &&
320
- ! nft_rbtree_interval_end (this )) {
389
+ nft_rbtree_interval_start (this )) {
321
390
parent = parent -> rb_left ;
322
391
continue ;
323
- } else if (! nft_rbtree_interval_end (rbe ) &&
392
+ } else if (nft_rbtree_interval_start (rbe ) &&
324
393
nft_rbtree_interval_end (this )) {
325
394
parent = parent -> rb_right ;
326
395
continue ;
0 commit comments