Skip to content

Commit dfdc8d2

Browse files
committed
Merge patch series "fs,mm: add kmem_cache_create_rcu()"
Christian Brauner <[email protected]> says: When a kmem cache is created with SLAB_TYPESAFE_BY_RCU the free pointer must be located outside of the object because we don't know what part of the memory can safely be overwritten as it may be needed to prevent object recycling. That has the consequence that SLAB_TYPESAFE_BY_RCU may end up adding a new cacheline. This is the case for e.g., struct file. After having it shrunk down by 40 bytes and having it fit in three cachelines we still have SLAB_TYPESAFE_BY_RCU adding a fourth cacheline because it needs to accommodate the free pointer. Add a new kmem_cache_create_rcu() function that allows the caller to specify an offset where the free pointer is supposed to be placed. Before this series cat /proc/slabinfo: filp 1198 1248 256 32 2 : tunables 0 0 0 : slabdata 39 39 0 ^^^ After this series cat /proc/slabinfo: filp 1323 1323 192 21 1 : tunables 0 0 0 : slabdata 63 63 0 ^^^ * patches from https://lore.kernel.org/r/[email protected]: fs: use kmem_cache_create_rcu() mm: add kmem_cache_create_rcu() mm: remove unused root_cache argument Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Christian Brauner <[email protected]>
2 parents c0390d5 + ea566e1 commit dfdc8d2

File tree

6 files changed

+127
-51
lines changed

6 files changed

+127
-51
lines changed

fs/file_table.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -511,9 +511,9 @@ EXPORT_SYMBOL(__fput_sync);
511511

512512
void __init files_init(void)
513513
{
514-
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
515-
SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN |
516-
SLAB_PANIC | SLAB_ACCOUNT, NULL);
514+
filp_cachep = kmem_cache_create_rcu("filp", sizeof(struct file),
515+
offsetof(struct file, f_freeptr),
516+
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
517517
percpu_counter_init(&nr_files, 0, GFP_KERNEL);
518518
}
519519

include/linux/fs.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1011,6 +1011,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
10111011
* @f_task_work: task work entry point
10121012
* @f_llist: work queue entrypoint
10131013
* @f_ra: file's readahead state
1014+
* @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.)
10141015
*/
10151016
struct file {
10161017
atomic_long_t f_count;
@@ -1042,6 +1043,7 @@ struct file {
10421043
struct callback_head f_task_work;
10431044
struct llist_node f_llist;
10441045
struct file_ra_state f_ra;
1046+
freeptr_t f_freeptr;
10451047
};
10461048
/* --- cacheline 3 boundary (192 bytes) --- */
10471049
} __randomize_layout

include/linux/slab.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -212,6 +212,12 @@ enum _slab_flag_bits {
212212
#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED
213213
#endif
214214

215+
/*
216+
* freeptr_t represents a SLUB freelist pointer, which might be encoded
217+
* and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
218+
*/
219+
typedef struct { unsigned long v; } freeptr_t;
220+
215221
/*
216222
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
217223
*
@@ -242,6 +248,9 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name,
242248
slab_flags_t flags,
243249
unsigned int useroffset, unsigned int usersize,
244250
void (*ctor)(void *));
251+
struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size,
252+
unsigned int freeptr_offset,
253+
slab_flags_t flags);
245254
void kmem_cache_destroy(struct kmem_cache *s);
246255
int kmem_cache_shrink(struct kmem_cache *s);
247256

mm/slab.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -261,6 +261,8 @@ struct kmem_cache {
261261
unsigned int object_size; /* Object size without metadata */
262262
struct reciprocal_value reciprocal_size;
263263
unsigned int offset; /* Free pointer offset */
264+
/* Specific free pointer requested (if not UINT_MAX) */
265+
unsigned int rcu_freeptr_offset;
264266
#ifdef CONFIG_SLUB_CPU_PARTIAL
265267
/* Number of per cpu partial objects to keep around */
266268
unsigned int cpu_partial;

mm/slab_common.c

Lines changed: 98 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -202,31 +202,38 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
202202
}
203203

204204
static struct kmem_cache *create_cache(const char *name,
205-
unsigned int object_size, unsigned int align,
206-
slab_flags_t flags, unsigned int useroffset,
207-
unsigned int usersize, void (*ctor)(void *),
208-
struct kmem_cache *root_cache)
205+
unsigned int object_size, unsigned int freeptr_offset,
206+
unsigned int align, slab_flags_t flags,
207+
unsigned int useroffset, unsigned int usersize,
208+
void (*ctor)(void *))
209209
{
210210
struct kmem_cache *s;
211211
int err;
212212

213213
if (WARN_ON(useroffset + usersize > object_size))
214214
useroffset = usersize = 0;
215215

216+
/* If a custom freelist pointer is requested make sure it's sane. */
217+
err = -EINVAL;
218+
if (freeptr_offset != UINT_MAX &&
219+
(freeptr_offset >= object_size || !(flags & SLAB_TYPESAFE_BY_RCU) ||
220+
!IS_ALIGNED(freeptr_offset, sizeof(freeptr_t))))
221+
goto out;
222+
216223
err = -ENOMEM;
217224
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
218225
if (!s)
219226
goto out;
220227

221228
s->name = name;
222229
s->size = s->object_size = object_size;
230+
s->rcu_freeptr_offset = freeptr_offset;
223231
s->align = align;
224232
s->ctor = ctor;
225233
#ifdef CONFIG_HARDENED_USERCOPY
226234
s->useroffset = useroffset;
227235
s->usersize = usersize;
228236
#endif
229-
230237
err = __kmem_cache_create(s, flags);
231238
if (err)
232239
goto out_free_cache;
@@ -241,38 +248,10 @@ static struct kmem_cache *create_cache(const char *name,
241248
return ERR_PTR(err);
242249
}
243250

244-
/**
245-
* kmem_cache_create_usercopy - Create a cache with a region suitable
246-
* for copying to userspace
247-
* @name: A string which is used in /proc/slabinfo to identify this cache.
248-
* @size: The size of objects to be created in this cache.
249-
* @align: The required alignment for the objects.
250-
* @flags: SLAB flags
251-
* @useroffset: Usercopy region offset
252-
* @usersize: Usercopy region size
253-
* @ctor: A constructor for the objects.
254-
*
255-
* Cannot be called within a interrupt, but can be interrupted.
256-
* The @ctor is run when new pages are allocated by the cache.
257-
*
258-
* The flags are
259-
*
260-
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
261-
* to catch references to uninitialised memory.
262-
*
263-
* %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
264-
* for buffer overruns.
265-
*
266-
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
267-
* cacheline. This can be beneficial if you're counting cycles as closely
268-
* as davem.
269-
*
270-
* Return: a pointer to the cache on success, NULL on failure.
271-
*/
272-
struct kmem_cache *
273-
kmem_cache_create_usercopy(const char *name,
274-
unsigned int size, unsigned int align,
275-
slab_flags_t flags,
251+
static struct kmem_cache *
252+
do_kmem_cache_create_usercopy(const char *name,
253+
unsigned int size, unsigned int freeptr_offset,
254+
unsigned int align, slab_flags_t flags,
276255
unsigned int useroffset, unsigned int usersize,
277256
void (*ctor)(void *))
278257
{
@@ -332,9 +311,9 @@ kmem_cache_create_usercopy(const char *name,
332311
goto out_unlock;
333312
}
334313

335-
s = create_cache(cache_name, size,
314+
s = create_cache(cache_name, size, freeptr_offset,
336315
calculate_alignment(flags, align, size),
337-
flags, useroffset, usersize, ctor, NULL);
316+
flags, useroffset, usersize, ctor);
338317
if (IS_ERR(s)) {
339318
err = PTR_ERR(s);
340319
kfree_const(cache_name);
@@ -356,6 +335,45 @@ kmem_cache_create_usercopy(const char *name,
356335
}
357336
return s;
358337
}
338+
339+
/**
340+
* kmem_cache_create_usercopy - Create a cache with a region suitable
341+
* for copying to userspace
342+
* @name: A string which is used in /proc/slabinfo to identify this cache.
343+
* @size: The size of objects to be created in this cache.
344+
* @freeptr_offset: Custom offset for the free pointer in RCU caches
345+
* @align: The required alignment for the objects.
346+
* @flags: SLAB flags
347+
* @useroffset: Usercopy region offset
348+
* @usersize: Usercopy region size
349+
* @ctor: A constructor for the objects.
350+
*
351+
* Cannot be called within a interrupt, but can be interrupted.
352+
* The @ctor is run when new pages are allocated by the cache.
353+
*
354+
* The flags are
355+
*
356+
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
357+
* to catch references to uninitialised memory.
358+
*
359+
* %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
360+
* for buffer overruns.
361+
*
362+
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
363+
* cacheline. This can be beneficial if you're counting cycles as closely
364+
* as davem.
365+
*
366+
* Return: a pointer to the cache on success, NULL on failure.
367+
*/
368+
struct kmem_cache *
369+
kmem_cache_create_usercopy(const char *name, unsigned int size,
370+
unsigned int align, slab_flags_t flags,
371+
unsigned int useroffset, unsigned int usersize,
372+
void (*ctor)(void *))
373+
{
374+
return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags,
375+
useroffset, usersize, ctor);
376+
}
359377
EXPORT_SYMBOL(kmem_cache_create_usercopy);
360378

361379
/**
@@ -387,11 +405,50 @@ struct kmem_cache *
387405
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
388406
slab_flags_t flags, void (*ctor)(void *))
389407
{
390-
return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
391-
ctor);
408+
return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags,
409+
0, 0, ctor);
392410
}
393411
EXPORT_SYMBOL(kmem_cache_create);
394412

413+
/**
414+
* kmem_cache_create_rcu - Create a SLAB_TYPESAFE_BY_RCU cache.
415+
* @name: A string which is used in /proc/slabinfo to identify this cache.
416+
* @size: The size of objects to be created in this cache.
417+
* @freeptr_offset: The offset into the memory to the free pointer
418+
* @flags: SLAB flags
419+
*
420+
* Cannot be called within an interrupt, but can be interrupted.
421+
*
422+
* See kmem_cache_create() for an explanation of possible @flags.
423+
*
424+
* By default SLAB_TYPESAFE_BY_RCU caches place the free pointer outside
425+
* of the object. This might cause the object to grow in size. Callers
426+
* that have a reason to avoid this can specify a custom free pointer
427+
* offset in their struct where the free pointer will be placed.
428+
*
429+
* Note that placing the free pointer inside the object requires the
430+
* caller to ensure that no fields are invalidated that are required to
431+
* guard against object recycling (See SLAB_TYPESAFE_BY_RCU for
432+
* details.).
433+
*
434+
* Using zero as a value for @freeptr_offset is valid. To request no
435+
* offset UINT_MAX must be specified.
436+
*
437+
* Note that @ctor isn't supported with custom free pointers as a @ctor
438+
* requires an external free pointer.
439+
*
440+
* Return: a pointer to the cache on success, NULL on failure.
441+
*/
442+
struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size,
443+
unsigned int freeptr_offset,
444+
slab_flags_t flags)
445+
{
446+
return do_kmem_cache_create_usercopy(name, size, freeptr_offset, 0,
447+
flags | SLAB_TYPESAFE_BY_RCU, 0, 0,
448+
NULL);
449+
}
450+
EXPORT_SYMBOL(kmem_cache_create_rcu);
451+
395452
static struct kmem_cache *kmem_buckets_cache __ro_after_init;
396453

397454
/**

mm/slub.c

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -465,12 +465,6 @@ static struct workqueue_struct *flushwq;
465465
* Core slab cache functions
466466
*******************************************************************/
467467

468-
/*
469-
* freeptr_t represents a SLUB freelist pointer, which might be encoded
470-
* and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
471-
*/
472-
typedef struct { unsigned long v; } freeptr_t;
473-
474468
/*
475469
* Returns freelist pointer (ptr). With hardening, this is obfuscated
476470
* with an XOR of the address where the pointer is held and a per-cache
@@ -3921,6 +3915,9 @@ static void *__slab_alloc_node(struct kmem_cache *s,
39213915
/*
39223916
* If the object has been wiped upon free, make sure it's fully initialized by
39233917
* zeroing out freelist pointer.
3918+
*
3919+
* Note that we also wipe custom freelist pointers specified via
3920+
* s->rcu_freeptr_offset.
39243921
*/
39253922
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
39263923
void *obj)
@@ -5144,6 +5141,12 @@ static void set_cpu_partial(struct kmem_cache *s)
51445141
#endif
51455142
}
51465143

5144+
/* Was a valid freeptr offset requested? */
5145+
static inline bool has_freeptr_offset(const struct kmem_cache *s)
5146+
{
5147+
return s->rcu_freeptr_offset != UINT_MAX;
5148+
}
5149+
51475150
/*
51485151
* calculate_sizes() determines the order and the distribution of data within
51495152
* a slab object.
@@ -5189,7 +5192,8 @@ static int calculate_sizes(struct kmem_cache *s)
51895192
*/
51905193
s->inuse = size;
51915194

5192-
if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor ||
5195+
if (((flags & SLAB_TYPESAFE_BY_RCU) && !has_freeptr_offset(s)) ||
5196+
(flags & SLAB_POISON) || s->ctor ||
51935197
((flags & SLAB_RED_ZONE) &&
51945198
(s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
51955199
/*
@@ -5210,6 +5214,8 @@ static int calculate_sizes(struct kmem_cache *s)
52105214
*/
52115215
s->offset = size;
52125216
size += sizeof(void *);
5217+
} else if ((flags & SLAB_TYPESAFE_BY_RCU) && has_freeptr_offset(s)) {
5218+
s->offset = s->rcu_freeptr_offset;
52135219
} else {
52145220
/*
52155221
* Store freelist pointer near middle of object to keep

0 commit comments

Comments
 (0)