Skip to content

Commit b6da940

Browse files
committed
mm, slab: add kerneldocs for common SLAB_ flags
We have many SLAB_ flags but many are used only internally, by kunit tests or debugging subsystems cooperating with slab, or are set according to slab_debug boot parameter. Create kerneldocs for the commonly used flags that may be passed to kmem_cache_create(). SLAB_TYPESAFE_BY_RCU already had a detailed description, so turn it to a kerneldoc. Add some details for SLAB_ACCOUNT, SLAB_RECLAIM_ACCOUNT and SLAB_HWCACHE_ALIGN. Reference them from the __kmem_cache_create_args() kerneldoc. Signed-off-by: Vlastimil Babka <[email protected]>
1 parent b4b797d commit b6da940

File tree

2 files changed

+54
-20
lines changed

2 files changed

+54
-20
lines changed

include/linux/slab.h

Lines changed: 41 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,17 @@ enum _slab_flag_bits {
7777
#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
7878
/* Indicate a kmalloc slab */
7979
#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
80-
/* Align objs on cache lines */
80+
/**
81+
* define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
82+
*
83+
* Sufficiently large objects are aligned on cache line boundary. For object
84+
* size smaller than a half of cache line size, the alignment is on the half of
85+
* cache line size. In general, if object size is smaller than 1/2^n of cache
86+
* line size, the alignment is adjusted to 1/2^n.
87+
*
88+
* If explicit alignment is also requested by the respective
89+
* &struct kmem_cache_args field, the greater of both is alignments is applied.
90+
*/
8191
#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
8292
/* Use GFP_DMA memory */
8393
#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
@@ -87,8 +97,8 @@ enum _slab_flag_bits {
8797
#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
8898
/* Panic if kmem_cache_create() fails */
8999
#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
90-
/*
91-
* SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
100+
/**
101+
* define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
92102
*
93103
* This delays freeing the SLAB page by a grace period, it does _NOT_
94104
* delay object freeing. This means that if you do kmem_cache_free()
@@ -99,20 +109,22 @@ enum _slab_flag_bits {
99109
* stays valid, the trick to using this is relying on an independent
100110
* object validation pass. Something like:
101111
*
102-
* begin:
103-
* rcu_read_lock();
104-
* obj = lockless_lookup(key);
105-
* if (obj) {
106-
* if (!try_get_ref(obj)) // might fail for free objects
107-
* rcu_read_unlock();
108-
* goto begin;
112+
* ::
113+
*
114+
* begin:
115+
* rcu_read_lock();
116+
* obj = lockless_lookup(key);
117+
* if (obj) {
118+
* if (!try_get_ref(obj)) // might fail for free objects
119+
* rcu_read_unlock();
120+
* goto begin;
109121
*
110-
* if (obj->key != key) { // not the object we expected
111-
* put_ref(obj);
112-
* rcu_read_unlock();
113-
* goto begin;
114-
* }
115-
* }
122+
* if (obj->key != key) { // not the object we expected
123+
* put_ref(obj);
124+
* rcu_read_unlock();
125+
* goto begin;
126+
* }
127+
* }
116128
* rcu_read_unlock();
117129
*
118130
* This is useful if we need to approach a kernel structure obliquely,
@@ -137,7 +149,6 @@ enum _slab_flag_bits {
137149
*
138150
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
139151
*/
140-
/* Defer freeing slabs to RCU */
141152
#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
142153
/* Trace allocations and frees */
143154
#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
@@ -170,7 +181,12 @@ enum _slab_flag_bits {
170181
#else
171182
# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
172183
#endif
173-
/* Account to memcg */
184+
/**
185+
* define SLAB_ACCOUNT - Account allocations to memcg.
186+
*
187+
* All object allocations from this cache will be memcg accounted, regardless of
188+
* __GFP_ACCOUNT being or not being passed to individual allocations.
189+
*/
174190
#ifdef CONFIG_MEMCG
175191
# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
176192
#else
@@ -197,7 +213,13 @@ enum _slab_flag_bits {
197213
#endif
198214

199215
/* The following flags affect the page allocator grouping pages by mobility */
200-
/* Objects are reclaimable */
216+
/**
217+
* define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
218+
*
219+
* Use this flag for caches that have an associated shrinker. As a result, slab
220+
* pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by
221+
* mobility, and are accounted in SReclaimable counter in /proc/meminfo
222+
*/
201223
#ifndef CONFIG_SLUB_TINY
202224
#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
203225
#else

mm/slab_common.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -254,11 +254,23 @@ static struct kmem_cache *create_cache(const char *name,
254254
* @object_size: The size of objects to be created in this cache.
255255
* @args: Additional arguments for the cache creation (see
256256
* &struct kmem_cache_args).
257-
* @flags: See %SLAB_* flags for an explanation of individual @flags.
257+
* @flags: See the desriptions of individual flags. The common ones are listed
258+
* in the description below.
258259
*
259260
* Not to be called directly, use the kmem_cache_create() wrapper with the same
260261
* parameters.
261262
*
263+
* Commonly used @flags:
264+
*
265+
* &SLAB_ACCOUNT - Account allocations to memcg.
266+
*
267+
* &SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
268+
*
269+
* &SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
270+
*
271+
* &SLAB_TYPESAFE_BY_RCU - Slab page (not individual objects) freeing delayed
272+
* by a grace period - see the full description before using.
273+
*
262274
* Context: Cannot be called within a interrupt, but can be interrupted.
263275
*
264276
* Return: a pointer to the cache on success, NULL on failure.

0 commit comments

Comments
 (0)