@@ -202,31 +202,38 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
202
202
}
203
203
204
204
static struct kmem_cache * create_cache (const char * name ,
205
- unsigned int object_size , unsigned int align ,
206
- slab_flags_t flags , unsigned int useroffset ,
207
- unsigned int usersize , void ( * ctor )( void * ) ,
208
- struct kmem_cache * root_cache )
205
+ unsigned int object_size , unsigned int freeptr_offset ,
206
+ unsigned int align , slab_flags_t flags ,
207
+ unsigned int useroffset , unsigned int usersize ,
208
+ void ( * ctor )( void * ) )
209
209
{
210
210
struct kmem_cache * s ;
211
211
int err ;
212
212
213
213
if (WARN_ON (useroffset + usersize > object_size ))
214
214
useroffset = usersize = 0 ;
215
215
216
+ /* If a custom freelist pointer is requested make sure it's sane. */
217
+ err = - EINVAL ;
218
+ if (freeptr_offset != UINT_MAX &&
219
+ (freeptr_offset >= object_size || !(flags & SLAB_TYPESAFE_BY_RCU ) ||
220
+ !IS_ALIGNED (freeptr_offset , sizeof (freeptr_t ))))
221
+ goto out ;
222
+
216
223
err = - ENOMEM ;
217
224
s = kmem_cache_zalloc (kmem_cache , GFP_KERNEL );
218
225
if (!s )
219
226
goto out ;
220
227
221
228
s -> name = name ;
222
229
s -> size = s -> object_size = object_size ;
230
+ s -> rcu_freeptr_offset = freeptr_offset ;
223
231
s -> align = align ;
224
232
s -> ctor = ctor ;
225
233
#ifdef CONFIG_HARDENED_USERCOPY
226
234
s -> useroffset = useroffset ;
227
235
s -> usersize = usersize ;
228
236
#endif
229
-
230
237
err = __kmem_cache_create (s , flags );
231
238
if (err )
232
239
goto out_free_cache ;
@@ -241,38 +248,10 @@ static struct kmem_cache *create_cache(const char *name,
241
248
return ERR_PTR (err );
242
249
}
243
250
244
- /**
245
- * kmem_cache_create_usercopy - Create a cache with a region suitable
246
- * for copying to userspace
247
- * @name: A string which is used in /proc/slabinfo to identify this cache.
248
- * @size: The size of objects to be created in this cache.
249
- * @align: The required alignment for the objects.
250
- * @flags: SLAB flags
251
- * @useroffset: Usercopy region offset
252
- * @usersize: Usercopy region size
253
- * @ctor: A constructor for the objects.
254
- *
255
- * Cannot be called within a interrupt, but can be interrupted.
256
- * The @ctor is run when new pages are allocated by the cache.
257
- *
258
- * The flags are
259
- *
260
- * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
261
- * to catch references to uninitialised memory.
262
- *
263
- * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
264
- * for buffer overruns.
265
- *
266
- * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
267
- * cacheline. This can be beneficial if you're counting cycles as closely
268
- * as davem.
269
- *
270
- * Return: a pointer to the cache on success, NULL on failure.
271
- */
272
- struct kmem_cache *
273
- kmem_cache_create_usercopy (const char * name ,
274
- unsigned int size , unsigned int align ,
275
- slab_flags_t flags ,
251
+ static struct kmem_cache *
252
+ do_kmem_cache_create_usercopy (const char * name ,
253
+ unsigned int size , unsigned int freeptr_offset ,
254
+ unsigned int align , slab_flags_t flags ,
276
255
unsigned int useroffset , unsigned int usersize ,
277
256
void (* ctor )(void * ))
278
257
{
@@ -332,9 +311,9 @@ kmem_cache_create_usercopy(const char *name,
332
311
goto out_unlock ;
333
312
}
334
313
335
- s = create_cache (cache_name , size ,
314
+ s = create_cache (cache_name , size , freeptr_offset ,
336
315
calculate_alignment (flags , align , size ),
337
- flags , useroffset , usersize , ctor , NULL );
316
+ flags , useroffset , usersize , ctor );
338
317
if (IS_ERR (s )) {
339
318
err = PTR_ERR (s );
340
319
kfree_const (cache_name );
@@ -356,6 +335,45 @@ kmem_cache_create_usercopy(const char *name,
356
335
}
357
336
return s ;
358
337
}
338
+
339
+ /**
340
+ * kmem_cache_create_usercopy - Create a cache with a region suitable
341
+ * for copying to userspace
342
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
343
+ * @size: The size of objects to be created in this cache.
344
+ * @freeptr_offset: Custom offset for the free pointer in RCU caches
345
+ * @align: The required alignment for the objects.
346
+ * @flags: SLAB flags
347
+ * @useroffset: Usercopy region offset
348
+ * @usersize: Usercopy region size
349
+ * @ctor: A constructor for the objects.
350
+ *
351
+ * Cannot be called within a interrupt, but can be interrupted.
352
+ * The @ctor is run when new pages are allocated by the cache.
353
+ *
354
+ * The flags are
355
+ *
356
+ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
357
+ * to catch references to uninitialised memory.
358
+ *
359
+ * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
360
+ * for buffer overruns.
361
+ *
362
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
363
+ * cacheline. This can be beneficial if you're counting cycles as closely
364
+ * as davem.
365
+ *
366
+ * Return: a pointer to the cache on success, NULL on failure.
367
+ */
368
+ struct kmem_cache *
369
+ kmem_cache_create_usercopy (const char * name , unsigned int size ,
370
+ unsigned int align , slab_flags_t flags ,
371
+ unsigned int useroffset , unsigned int usersize ,
372
+ void (* ctor )(void * ))
373
+ {
374
+ return do_kmem_cache_create_usercopy (name , size , UINT_MAX , align , flags ,
375
+ useroffset , usersize , ctor );
376
+ }
359
377
EXPORT_SYMBOL (kmem_cache_create_usercopy );
360
378
361
379
/**
@@ -387,11 +405,50 @@ struct kmem_cache *
387
405
kmem_cache_create (const char * name , unsigned int size , unsigned int align ,
388
406
slab_flags_t flags , void (* ctor )(void * ))
389
407
{
390
- return kmem_cache_create_usercopy (name , size , align , flags , 0 , 0 ,
391
- ctor );
408
+ return do_kmem_cache_create_usercopy (name , size , UINT_MAX , align , flags ,
409
+ 0 , 0 , ctor );
392
410
}
393
411
EXPORT_SYMBOL (kmem_cache_create );
394
412
413
+ /**
414
+ * kmem_cache_create_rcu - Create a SLAB_TYPESAFE_BY_RCU cache.
415
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
416
+ * @size: The size of objects to be created in this cache.
417
+ * @freeptr_offset: The offset into the memory to the free pointer
418
+ * @flags: SLAB flags
419
+ *
420
+ * Cannot be called within an interrupt, but can be interrupted.
421
+ *
422
+ * See kmem_cache_create() for an explanation of possible @flags.
423
+ *
424
+ * By default SLAB_TYPESAFE_BY_RCU caches place the free pointer outside
425
+ * of the object. This might cause the object to grow in size. Callers
426
+ * that have a reason to avoid this can specify a custom free pointer
427
+ * offset in their struct where the free pointer will be placed.
428
+ *
429
+ * Note that placing the free pointer inside the object requires the
430
+ * caller to ensure that no fields are invalidated that are required to
431
+ * guard against object recycling (See SLAB_TYPESAFE_BY_RCU for
432
+ * details.).
433
+ *
434
+ * Using zero as a value for @freeptr_offset is valid. To request no
435
+ * offset UINT_MAX must be specified.
436
+ *
437
+ * Note that @ctor isn't supported with custom free pointers as a @ctor
438
+ * requires an external free pointer.
439
+ *
440
+ * Return: a pointer to the cache on success, NULL on failure.
441
+ */
442
+ struct kmem_cache * kmem_cache_create_rcu (const char * name , unsigned int size ,
443
+ unsigned int freeptr_offset ,
444
+ slab_flags_t flags )
445
+ {
446
+ return do_kmem_cache_create_usercopy (name , size , freeptr_offset , 0 ,
447
+ flags | SLAB_TYPESAFE_BY_RCU , 0 , 0 ,
448
+ NULL );
449
+ }
450
+ EXPORT_SYMBOL (kmem_cache_create_rcu );
451
+
395
452
static struct kmem_cache * kmem_buckets_cache __ro_after_init ;
396
453
397
454
/**
0 commit comments