@@ -202,30 +202,38 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
202
202
}
203
203
204
204
static struct kmem_cache * create_cache (const char * name ,
205
- unsigned int object_size , unsigned int align ,
206
- slab_flags_t flags , unsigned int useroffset ,
207
- unsigned int usersize , void (* ctor )(void * ))
205
+ unsigned int object_size , unsigned int freeptr_offset ,
206
+ unsigned int align , slab_flags_t flags ,
207
+ unsigned int useroffset , unsigned int usersize ,
208
+ void (* ctor )(void * ))
208
209
{
209
210
struct kmem_cache * s ;
210
211
int err ;
211
212
212
213
if (WARN_ON (useroffset + usersize > object_size ))
213
214
useroffset = usersize = 0 ;
214
215
216
+ /* If a custom freelist pointer is requested make sure it's sane. */
217
+ err = - EINVAL ;
218
+ if (freeptr_offset != UINT_MAX &&
219
+ (freeptr_offset >= object_size || !(flags & SLAB_TYPESAFE_BY_RCU ) ||
220
+ !IS_ALIGNED (freeptr_offset , sizeof (freeptr_t ))))
221
+ goto out ;
222
+
215
223
err = - ENOMEM ;
216
224
s = kmem_cache_zalloc (kmem_cache , GFP_KERNEL );
217
225
if (!s )
218
226
goto out ;
219
227
220
228
s -> name = name ;
221
229
s -> size = s -> object_size = object_size ;
230
+ s -> rcu_freeptr_offset = freeptr_offset ;
222
231
s -> align = align ;
223
232
s -> ctor = ctor ;
224
233
#ifdef CONFIG_HARDENED_USERCOPY
225
234
s -> useroffset = useroffset ;
226
235
s -> usersize = usersize ;
227
236
#endif
228
-
229
237
err = __kmem_cache_create (s , flags );
230
238
if (err )
231
239
goto out_free_cache ;
@@ -240,38 +248,10 @@ static struct kmem_cache *create_cache(const char *name,
240
248
return ERR_PTR (err );
241
249
}
242
250
243
- /**
244
- * kmem_cache_create_usercopy - Create a cache with a region suitable
245
- * for copying to userspace
246
- * @name: A string which is used in /proc/slabinfo to identify this cache.
247
- * @size: The size of objects to be created in this cache.
248
- * @align: The required alignment for the objects.
249
- * @flags: SLAB flags
250
- * @useroffset: Usercopy region offset
251
- * @usersize: Usercopy region size
252
- * @ctor: A constructor for the objects.
253
- *
254
- * Cannot be called within a interrupt, but can be interrupted.
255
- * The @ctor is run when new pages are allocated by the cache.
256
- *
257
- * The flags are
258
- *
259
- * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
260
- * to catch references to uninitialised memory.
261
- *
262
- * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
263
- * for buffer overruns.
264
- *
265
- * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
266
- * cacheline. This can be beneficial if you're counting cycles as closely
267
- * as davem.
268
- *
269
- * Return: a pointer to the cache on success, NULL on failure.
270
- */
271
- struct kmem_cache *
272
- kmem_cache_create_usercopy (const char * name ,
273
- unsigned int size , unsigned int align ,
274
- slab_flags_t flags ,
251
+ static struct kmem_cache *
252
+ do_kmem_cache_create_usercopy (const char * name ,
253
+ unsigned int size , unsigned int freeptr_offset ,
254
+ unsigned int align , slab_flags_t flags ,
275
255
unsigned int useroffset , unsigned int usersize ,
276
256
void (* ctor )(void * ))
277
257
{
@@ -331,7 +311,7 @@ kmem_cache_create_usercopy(const char *name,
331
311
goto out_unlock ;
332
312
}
333
313
334
- s = create_cache (cache_name , size ,
314
+ s = create_cache (cache_name , size , freeptr_offset ,
335
315
calculate_alignment (flags , align , size ),
336
316
flags , useroffset , usersize , ctor );
337
317
if (IS_ERR (s )) {
@@ -355,6 +335,45 @@ kmem_cache_create_usercopy(const char *name,
355
335
}
356
336
return s ;
357
337
}
338
+
339
+ /**
340
+ * kmem_cache_create_usercopy - Create a cache with a region suitable
341
+ * for copying to userspace
342
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
343
+ * @size: The size of objects to be created in this cache.
344
+ * @freeptr_offset: Custom offset for the free pointer in RCU caches
345
+ * @align: The required alignment for the objects.
346
+ * @flags: SLAB flags
347
+ * @useroffset: Usercopy region offset
348
+ * @usersize: Usercopy region size
349
+ * @ctor: A constructor for the objects.
350
+ *
351
+ * Cannot be called within a interrupt, but can be interrupted.
352
+ * The @ctor is run when new pages are allocated by the cache.
353
+ *
354
+ * The flags are
355
+ *
356
+ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
357
+ * to catch references to uninitialised memory.
358
+ *
359
+ * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
360
+ * for buffer overruns.
361
+ *
362
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
363
+ * cacheline. This can be beneficial if you're counting cycles as closely
364
+ * as davem.
365
+ *
366
+ * Return: a pointer to the cache on success, NULL on failure.
367
+ */
368
+ struct kmem_cache *
369
+ kmem_cache_create_usercopy (const char * name , unsigned int size ,
370
+ unsigned int align , slab_flags_t flags ,
371
+ unsigned int useroffset , unsigned int usersize ,
372
+ void (* ctor )(void * ))
373
+ {
374
+ return do_kmem_cache_create_usercopy (name , size , UINT_MAX , align , flags ,
375
+ useroffset , usersize , ctor );
376
+ }
358
377
EXPORT_SYMBOL (kmem_cache_create_usercopy );
359
378
360
379
/**
@@ -386,11 +405,50 @@ struct kmem_cache *
386
405
kmem_cache_create (const char * name , unsigned int size , unsigned int align ,
387
406
slab_flags_t flags , void (* ctor )(void * ))
388
407
{
389
- return kmem_cache_create_usercopy (name , size , align , flags , 0 , 0 ,
390
- ctor );
408
+ return do_kmem_cache_create_usercopy (name , size , UINT_MAX , align , flags ,
409
+ 0 , 0 , ctor );
391
410
}
392
411
EXPORT_SYMBOL (kmem_cache_create );
393
412
413
+ /**
414
+ * kmem_cache_create_rcu - Create a SLAB_TYPESAFE_BY_RCU cache.
415
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
416
+ * @size: The size of objects to be created in this cache.
417
+ * @freeptr_offset: The offset into the memory to the free pointer
418
+ * @flags: SLAB flags
419
+ *
420
+ * Cannot be called within an interrupt, but can be interrupted.
421
+ *
422
+ * See kmem_cache_create() for an explanation of possible @flags.
423
+ *
424
+ * By default SLAB_TYPESAFE_BY_RCU caches place the free pointer outside
425
+ * of the object. This might cause the object to grow in size. Callers
426
+ * that have a reason to avoid this can specify a custom free pointer
427
+ * offset in their struct where the free pointer will be placed.
428
+ *
429
+ * Note that placing the free pointer inside the object requires the
430
+ * caller to ensure that no fields are invalidated that are required to
431
+ * guard against object recycling (See SLAB_TYPESAFE_BY_RCU for
432
+ * details.).
433
+ *
434
+ * Using zero as a value for @freeptr_offset is valid. To request no
435
+ * offset UINT_MAX must be specified.
436
+ *
437
+ * Note that @ctor isn't supported with custom free pointers as a @ctor
438
+ * requires an external free pointer.
439
+ *
440
+ * Return: a pointer to the cache on success, NULL on failure.
441
+ */
442
+ struct kmem_cache * kmem_cache_create_rcu (const char * name , unsigned int size ,
443
+ unsigned int freeptr_offset ,
444
+ slab_flags_t flags )
445
+ {
446
+ return do_kmem_cache_create_usercopy (name , size , freeptr_offset , 0 ,
447
+ flags | SLAB_TYPESAFE_BY_RCU , 0 , 0 ,
448
+ NULL );
449
+ }
450
+ EXPORT_SYMBOL (kmem_cache_create_rcu );
451
+
394
452
static struct kmem_cache * kmem_buckets_cache __ro_after_init ;
395
453
396
454
/**
0 commit comments