@@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry)
176
176
*/
177
177
static inline bool xa_is_err (const void * entry )
178
178
{
179
- return unlikely (xa_is_internal (entry ));
179
+ return unlikely (xa_is_internal (entry ) &&
180
+ entry >= xa_mk_internal (- MAX_ERRNO ));
180
181
}
181
182
182
183
/**
@@ -286,7 +287,6 @@ struct xarray {
286
287
*/
287
288
#define DEFINE_XARRAY_ALLOC (name ) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
288
289
289
- void xa_init_flags (struct xarray * , gfp_t flags );
290
290
void * xa_load (struct xarray * , unsigned long index );
291
291
void * xa_store (struct xarray * , unsigned long index , void * entry , gfp_t );
292
292
void * xa_erase (struct xarray * , unsigned long index );
@@ -303,6 +303,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
303
303
unsigned long max , unsigned int n , xa_mark_t );
304
304
void xa_destroy (struct xarray * );
305
305
306
+ /**
307
+ * xa_init_flags() - Initialise an empty XArray with flags.
308
+ * @xa: XArray.
309
+ * @flags: XA_FLAG values.
310
+ *
311
+ * If you need to initialise an XArray with special flags (eg you need
312
+ * to take the lock from interrupt context), use this function instead
313
+ * of xa_init().
314
+ *
315
+ * Context: Any context.
316
+ */
317
+ static inline void xa_init_flags (struct xarray * xa , gfp_t flags )
318
+ {
319
+ spin_lock_init (& xa -> xa_lock );
320
+ xa -> xa_flags = flags ;
321
+ xa -> xa_head = NULL ;
322
+ }
323
+
306
324
/**
307
325
* xa_init() - Initialise an empty XArray.
308
326
* @xa: XArray.
@@ -342,20 +360,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
342
360
}
343
361
344
362
/**
345
- * xa_for_each () - Iterate over a portion of an XArray.
363
+ * xa_for_each_start () - Iterate over a portion of an XArray.
346
364
* @xa: XArray.
365
+ * @index: Index of @entry.
347
366
* @entry: Entry retrieved from array.
367
+ * @start: First index to retrieve from array.
368
+ *
369
+ * During the iteration, @entry will have the value of the entry stored
370
+ * in @xa at @index. You may modify @index during the iteration if you
371
+ * want to skip or reprocess indices. It is safe to modify the array
372
+ * during the iteration. At the end of the iteration, @entry will be set
373
+ * to NULL and @index will have a value less than or equal to max.
374
+ *
375
+ * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have
376
+ * to handle your own locking with xas_for_each(), and if you have to unlock
377
+ * after each iteration, it will also end up being O(n.log(n)).
378
+ * xa_for_each_start() will spin if it hits a retry entry; if you intend to
379
+ * see retry entries, you should use the xas_for_each() iterator instead.
380
+ * The xas_for_each() iterator will expand into more inline code than
381
+ * xa_for_each_start().
382
+ *
383
+ * Context: Any context. Takes and releases the RCU lock.
384
+ */
385
+ #define xa_for_each_start (xa , index , entry , start ) \
386
+ for (index = start, \
387
+ entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \
388
+ entry; \
389
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
390
+
391
+ /**
392
+ * xa_for_each() - Iterate over present entries in an XArray.
393
+ * @xa: XArray.
348
394
* @index: Index of @entry.
349
- * @max: Maximum index to retrieve from array.
350
- * @filter: Selection criterion.
395
+ * @entry: Entry retrieved from array.
351
396
*
352
- * Initialise @index to the lowest index you want to retrieve from the
353
- * array. During the iteration, @entry will have the value of the entry
354
- * stored in @xa at @index. The iteration will skip all entries in the
355
- * array which do not match @filter. You may modify @index during the
356
- * iteration if you want to skip or reprocess indices. It is safe to modify
357
- * the array during the iteration. At the end of the iteration, @entry will
358
- * be set to NULL and @index will have a value less than or equal to max.
397
+ * During the iteration, @entry will have the value of the entry stored
398
+ * in @xa at @index. You may modify @index during the iteration if you want
399
+ * to skip or reprocess indices. It is safe to modify the array during the
400
+ * iteration. At the end of the iteration, @entry will be set to NULL and
401
+ * @index will have a value less than or equal to max.
359
402
*
360
403
* xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
361
404
* to handle your own locking with xas_for_each(), and if you have to unlock
@@ -366,9 +409,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
366
409
*
367
410
* Context: Any context. Takes and releases the RCU lock.
368
411
*/
369
- #define xa_for_each (xa , entry , index , max , filter ) \
370
- for (entry = xa_find(xa, &index, max, filter); entry; \
371
- entry = xa_find_after(xa, &index, max, filter))
412
+ #define xa_for_each (xa , index , entry ) \
413
+ xa_for_each_start(xa, index, entry, 0)
414
+
415
+ /**
416
+ * xa_for_each_marked() - Iterate over marked entries in an XArray.
417
+ * @xa: XArray.
418
+ * @index: Index of @entry.
419
+ * @entry: Entry retrieved from array.
420
+ * @filter: Selection criterion.
421
+ *
422
+ * During the iteration, @entry will have the value of the entry stored
423
+ * in @xa at @index. The iteration will skip all entries in the array
424
+ * which do not match @filter. You may modify @index during the iteration
425
+ * if you want to skip or reprocess indices. It is safe to modify the array
426
+ * during the iteration. At the end of the iteration, @entry will be set to
427
+ * NULL and @index will have a value less than or equal to max.
428
+ *
429
+ * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
430
+ * You have to handle your own locking with xas_for_each(), and if you have
431
+ * to unlock after each iteration, it will also end up being O(n.log(n)).
432
+ * xa_for_each_marked() will spin if it hits a retry entry; if you intend to
433
+ * see retry entries, you should use the xas_for_each_marked() iterator
434
+ * instead. The xas_for_each_marked() iterator will expand into more inline
435
+ * code than xa_for_each_marked().
436
+ *
437
+ * Context: Any context. Takes and releases the RCU lock.
438
+ */
439
+ #define xa_for_each_marked (xa , index , entry , filter ) \
440
+ for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
441
+ entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
372
442
373
443
#define xa_trylock (xa ) spin_trylock(&(xa)->xa_lock)
374
444
#define xa_lock (xa ) spin_lock(&(xa)->xa_lock)
@@ -393,39 +463,12 @@ void *__xa_erase(struct xarray *, unsigned long index);
393
463
void * __xa_store (struct xarray * , unsigned long index , void * entry , gfp_t );
394
464
void * __xa_cmpxchg (struct xarray * , unsigned long index , void * old ,
395
465
void * entry , gfp_t );
466
+ int __xa_insert (struct xarray * , unsigned long index , void * entry , gfp_t );
396
467
int __xa_alloc (struct xarray * , u32 * id , u32 max , void * entry , gfp_t );
397
468
int __xa_reserve (struct xarray * , unsigned long index , gfp_t );
398
469
void __xa_set_mark (struct xarray * , unsigned long index , xa_mark_t );
399
470
void __xa_clear_mark (struct xarray * , unsigned long index , xa_mark_t );
400
471
401
- /**
402
- * __xa_insert() - Store this entry in the XArray unless another entry is
403
- * already present.
404
- * @xa: XArray.
405
- * @index: Index into array.
406
- * @entry: New entry.
407
- * @gfp: Memory allocation flags.
408
- *
409
- * If you would rather see the existing entry in the array, use __xa_cmpxchg().
410
- * This function is for users who don't care what the entry is, only that
411
- * one is present.
412
- *
413
- * Context: Any context. Expects xa_lock to be held on entry. May
414
- * release and reacquire xa_lock if the @gfp flags permit.
415
- * Return: 0 if the store succeeded. -EEXIST if another entry was present.
416
- * -ENOMEM if memory could not be allocated.
417
- */
418
- static inline int __xa_insert (struct xarray * xa , unsigned long index ,
419
- void * entry , gfp_t gfp )
420
- {
421
- void * curr = __xa_cmpxchg (xa , index , NULL , entry , gfp );
422
- if (!curr )
423
- return 0 ;
424
- if (xa_is_err (curr ))
425
- return xa_err (curr );
426
- return - EEXIST ;
427
- }
428
-
429
472
/**
430
473
* xa_store_bh() - Store this entry in the XArray.
431
474
* @xa: XArray.
@@ -453,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
453
496
}
454
497
455
498
/**
456
- * xa_store_irq() - Erase this entry from the XArray.
499
+ * xa_store_irq() - Store this entry in the XArray.
457
500
* @xa: XArray.
458
501
* @index: Index into array.
459
502
* @entry: New entry.
@@ -615,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
615
658
* @entry: New entry.
616
659
* @gfp: Memory allocation flags.
617
660
*
618
- * If you would rather see the existing entry in the array, use xa_cmpxchg().
619
- * This function is for users who don't care what the entry is, only that
620
- * one is present .
661
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
662
+ * if no entry is present. Inserting will fail if a reserved entry is
663
+ * present, even though loading from this index will return NULL .
621
664
*
622
- * Context: Process context. Takes and releases the xa_lock.
623
- * May sleep if the @gfp flags permit.
665
+ * Context: Any context. Takes and releases the xa_lock. May sleep if
666
+ * the @gfp flags permit.
624
667
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
625
668
* -ENOMEM if memory could not be allocated.
626
669
*/
627
670
static inline int xa_insert (struct xarray * xa , unsigned long index ,
628
671
void * entry , gfp_t gfp )
629
672
{
630
- void * curr = xa_cmpxchg (xa , index , NULL , entry , gfp );
631
- if (!curr )
632
- return 0 ;
633
- if (xa_is_err (curr ))
634
- return xa_err (curr );
635
- return - EEXIST ;
673
+ int err ;
674
+
675
+ xa_lock (xa );
676
+ err = __xa_insert (xa , index , entry , gfp );
677
+ xa_unlock (xa );
678
+
679
+ return err ;
680
+ }
681
+
682
+ /**
683
+ * xa_insert_bh() - Store this entry in the XArray unless another entry is
684
+ * already present.
685
+ * @xa: XArray.
686
+ * @index: Index into array.
687
+ * @entry: New entry.
688
+ * @gfp: Memory allocation flags.
689
+ *
690
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
691
+ * if no entry is present. Inserting will fail if a reserved entry is
692
+ * present, even though loading from this index will return NULL.
693
+ *
694
+ * Context: Any context. Takes and releases the xa_lock while
695
+ * disabling softirqs. May sleep if the @gfp flags permit.
696
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
697
+ * -ENOMEM if memory could not be allocated.
698
+ */
699
+ static inline int xa_insert_bh (struct xarray * xa , unsigned long index ,
700
+ void * entry , gfp_t gfp )
701
+ {
702
+ int err ;
703
+
704
+ xa_lock_bh (xa );
705
+ err = __xa_insert (xa , index , entry , gfp );
706
+ xa_unlock_bh (xa );
707
+
708
+ return err ;
709
+ }
710
+
711
+ /**
712
+ * xa_insert_irq() - Store this entry in the XArray unless another entry is
713
+ * already present.
714
+ * @xa: XArray.
715
+ * @index: Index into array.
716
+ * @entry: New entry.
717
+ * @gfp: Memory allocation flags.
718
+ *
719
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
720
+ * if no entry is present. Inserting will fail if a reserved entry is
721
+ * present, even though loading from this index will return NULL.
722
+ *
723
+ * Context: Process context. Takes and releases the xa_lock while
724
+ * disabling interrupts. May sleep if the @gfp flags permit.
725
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
726
+ * -ENOMEM if memory could not be allocated.
727
+ */
728
+ static inline int xa_insert_irq (struct xarray * xa , unsigned long index ,
729
+ void * entry , gfp_t gfp )
730
+ {
731
+ int err ;
732
+
733
+ xa_lock_irq (xa );
734
+ err = __xa_insert (xa , index , entry , gfp );
735
+ xa_unlock_irq (xa );
736
+
737
+ return err ;
636
738
}
637
739
638
740
/**
@@ -970,8 +1072,8 @@ static inline bool xa_is_sibling(const void *entry)
970
1072
(entry < xa_mk_sibling (XA_CHUNK_SIZE - 1 ));
971
1073
}
972
1074
973
- #define XA_ZERO_ENTRY xa_mk_internal(256)
974
- #define XA_RETRY_ENTRY xa_mk_internal(257)
1075
+ #define XA_RETRY_ENTRY xa_mk_internal(256)
1076
+ #define XA_ZERO_ENTRY xa_mk_internal(257)
975
1077
976
1078
/**
977
1079
* xa_is_zero() - Is the entry a zero entry?
@@ -995,6 +1097,17 @@ static inline bool xa_is_retry(const void *entry)
995
1097
return unlikely (entry == XA_RETRY_ENTRY );
996
1098
}
997
1099
1100
+ /**
1101
+ * xa_is_advanced() - Is the entry only permitted for the advanced API?
1102
+ * @entry: Entry to be stored in the XArray.
1103
+ *
1104
+ * Return: %true if the entry cannot be stored by the normal API.
1105
+ */
1106
+ static inline bool xa_is_advanced (const void * entry )
1107
+ {
1108
+ return xa_is_internal (entry ) && (entry <= XA_RETRY_ENTRY );
1109
+ }
1110
+
998
1111
/**
999
1112
* typedef xa_update_node_t - A callback function from the XArray.
1000
1113
* @node: The node which is being processed
0 commit comments