@@ -77,7 +77,17 @@ enum _slab_flag_bits {
77
77
#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
78
78
/* Indicate a kmalloc slab */
79
79
#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
80
- /* Align objs on cache lines */
80
+ /**
81
+ * define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
82
+ *
83
+ * Sufficiently large objects are aligned on cache line boundary. For object
84
+ * size smaller than a half of cache line size, the alignment is on the half of
85
+ * cache line size. In general, if object size is smaller than 1/2^n of cache
86
+ * line size, the alignment is adjusted to 1/2^n.
87
+ *
88
+ * If explicit alignment is also requested by the respective
89
+ * &struct kmem_cache_args field, the greater of both is alignments is applied.
90
+ */
81
91
#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
82
92
/* Use GFP_DMA memory */
83
93
#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
@@ -87,8 +97,8 @@ enum _slab_flag_bits {
87
97
#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
88
98
/* Panic if kmem_cache_create() fails */
89
99
#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
90
- /*
91
- * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
100
+ /**
101
+ * define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
92
102
*
93
103
* This delays freeing the SLAB page by a grace period, it does _NOT_
94
104
* delay object freeing. This means that if you do kmem_cache_free()
@@ -99,20 +109,22 @@ enum _slab_flag_bits {
99
109
* stays valid, the trick to using this is relying on an independent
100
110
* object validation pass. Something like:
101
111
*
102
- * begin:
103
- * rcu_read_lock();
104
- * obj = lockless_lookup(key);
105
- * if (obj) {
106
- * if (!try_get_ref(obj)) // might fail for free objects
107
- * rcu_read_unlock();
108
- * goto begin;
112
+ * ::
113
+ *
114
+ * begin:
115
+ * rcu_read_lock();
116
+ * obj = lockless_lookup(key);
117
+ * if (obj) {
118
+ * if (!try_get_ref(obj)) // might fail for free objects
119
+ * rcu_read_unlock();
120
+ * goto begin;
109
121
*
110
- * if (obj->key != key) { // not the object we expected
111
- * put_ref(obj);
112
- * rcu_read_unlock();
113
- * goto begin;
114
- * }
115
- * }
122
+ * if (obj->key != key) { // not the object we expected
123
+ * put_ref(obj);
124
+ * rcu_read_unlock();
125
+ * goto begin;
126
+ * }
127
+ * }
116
128
* rcu_read_unlock();
117
129
*
118
130
* This is useful if we need to approach a kernel structure obliquely,
@@ -137,7 +149,6 @@ enum _slab_flag_bits {
137
149
*
138
150
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
139
151
*/
140
- /* Defer freeing slabs to RCU */
141
152
#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
142
153
/* Trace allocations and frees */
143
154
#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
@@ -170,7 +181,12 @@ enum _slab_flag_bits {
170
181
#else
171
182
# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
172
183
#endif
173
- /* Account to memcg */
184
+ /**
185
+ * define SLAB_ACCOUNT - Account allocations to memcg.
186
+ *
187
+ * All object allocations from this cache will be memcg accounted, regardless of
188
+ * __GFP_ACCOUNT being or not being passed to individual allocations.
189
+ */
174
190
#ifdef CONFIG_MEMCG
175
191
# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
176
192
#else
@@ -197,7 +213,13 @@ enum _slab_flag_bits {
197
213
#endif
198
214
199
215
/* The following flags affect the page allocator grouping pages by mobility */
200
- /* Objects are reclaimable */
216
+ /**
217
+ * define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
218
+ *
219
+ * Use this flag for caches that have an associated shrinker. As a result, slab
220
+ * pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by
221
+ * mobility, and are accounted in SReclaimable counter in /proc/meminfo
222
+ */
201
223
#ifndef CONFIG_SLUB_TINY
202
224
#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
203
225
#else
0 commit comments