44
55#include <linux/types.h>
66#include <linux/gfp.h>
7+ #include <pthread.h>
78
8- #define SLAB_PANIC 2
99#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
1010
1111#define kzalloc_node (size , flags , node ) kmalloc(size, flags)
12+ enum _slab_flag_bits {
13+ _SLAB_KMALLOC ,
14+ _SLAB_HWCACHE_ALIGN ,
15+ _SLAB_PANIC ,
16+ _SLAB_TYPESAFE_BY_RCU ,
17+ _SLAB_ACCOUNT ,
18+ _SLAB_FLAGS_LAST_BIT
19+ };
20+
21+ #define __SLAB_FLAG_BIT (nr ) ((unsigned int __force)(1U << (nr)))
22+ #define __SLAB_FLAG_UNUSED ((unsigned int __force)(0U))
23+
24+ #define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
25+ #define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
26+ #define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
27+ #ifdef CONFIG_MEMCG
28+ # define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
29+ #else
30+ # define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
31+ #endif
1232
1333void * kmalloc (size_t size , gfp_t gfp );
1434void kfree (void * p );
@@ -23,6 +43,86 @@ enum slab_state {
2343 FULL
2444};
2545
46+ struct kmem_cache {
47+ pthread_mutex_t lock ;
48+ unsigned int size ;
49+ unsigned int align ;
50+ unsigned int sheaf_capacity ;
51+ int nr_objs ;
52+ void * objs ;
53+ void (* ctor )(void * );
54+ bool non_kernel_enabled ;
55+ unsigned int non_kernel ;
56+ unsigned long nr_allocated ;
57+ unsigned long nr_tallocated ;
58+ bool exec_callback ;
59+ void (* callback )(void * );
60+ void * private ;
61+ };
62+
63+ struct kmem_cache_args {
64+ /**
65+ * @align: The required alignment for the objects.
66+ *
67+ * %0 means no specific alignment is requested.
68+ */
69+ unsigned int align ;
70+ /**
71+ * @sheaf_capacity: The maximum size of the sheaf.
72+ */
73+ unsigned int sheaf_capacity ;
74+ /**
75+ * @useroffset: Usercopy region offset.
76+ *
77+ * %0 is a valid offset, when @usersize is non-%0
78+ */
79+ unsigned int useroffset ;
80+ /**
81+ * @usersize: Usercopy region size.
82+ *
83+ * %0 means no usercopy region is specified.
84+ */
85+ unsigned int usersize ;
86+ /**
87+ * @freeptr_offset: Custom offset for the free pointer
88+ * in &SLAB_TYPESAFE_BY_RCU caches
89+ *
90+ * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
91+ * outside of the object. This might cause the object to grow in size.
92+ * Cache creators that have a reason to avoid this can specify a custom
93+ * free pointer offset in their struct where the free pointer will be
94+ * placed.
95+ *
96+ * Note that placing the free pointer inside the object requires the
97+ * caller to ensure that no fields are invalidated that are required to
98+ * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
99+ * details).
100+ *
101+ * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
102+ * is specified, %use_freeptr_offset must be set %true.
103+ *
104+ * Note that @ctor currently isn't supported with custom free pointers
105+ * as a @ctor requires an external free pointer.
106+ */
107+ unsigned int freeptr_offset ;
108+ /**
109+ * @use_freeptr_offset: Whether a @freeptr_offset is used.
110+ */
111+ bool use_freeptr_offset ;
112+ /**
113+ * @ctor: A constructor for the objects.
114+ *
115+ * The constructor is invoked for each object in a newly allocated slab
116+ * page. It is the cache user's responsibility to free object in the
117+ * same state as after calling the constructor, or deal appropriately
118+ * with any differences between a freshly constructed and a reallocated
119+ * object.
120+ *
121+ * %NULL means no constructor.
122+ */
123+ void (* ctor )(void * );
124+ };
125+
26126static inline void * kzalloc (size_t size , gfp_t gfp )
27127{
28128 return kmalloc (size , gfp | __GFP_ZERO );
@@ -37,9 +137,38 @@ static inline void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
37137}
38138void kmem_cache_free (struct kmem_cache * cachep , void * objp );
39139
40- struct kmem_cache * kmem_cache_create (const char * name , unsigned int size ,
41- unsigned int align , unsigned int flags ,
42- void (* ctor )(void * ));
140+
141+ struct kmem_cache *
142+ __kmem_cache_create_args (const char * name , unsigned int size ,
143+ struct kmem_cache_args * args , unsigned int flags );
144+
145+ /* If NULL is passed for @args, use this variant with default arguments. */
146+ static inline struct kmem_cache *
147+ __kmem_cache_default_args (const char * name , unsigned int size ,
148+ struct kmem_cache_args * args , unsigned int flags )
149+ {
150+ struct kmem_cache_args kmem_default_args = {};
151+
152+ return __kmem_cache_create_args (name , size , & kmem_default_args , flags );
153+ }
154+
155+ static inline struct kmem_cache *
156+ __kmem_cache_create (const char * name , unsigned int size , unsigned int align ,
157+ unsigned int flags , void (* ctor )(void * ))
158+ {
159+ struct kmem_cache_args kmem_args = {
160+ .align = align ,
161+ .ctor = ctor ,
162+ };
163+
164+ return __kmem_cache_create_args (name , size , & kmem_args , flags );
165+ }
166+
167+ #define kmem_cache_create (__name , __object_size , __args , ...) \
168+ _Generic((__args), \
169+ struct kmem_cache_args *: __kmem_cache_create_args, \
170+ void *: __kmem_cache_default_args, \
171+ default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
43172
44173void kmem_cache_free_bulk (struct kmem_cache * cachep , size_t size , void * * list );
45174int kmem_cache_alloc_bulk (struct kmem_cache * cachep , gfp_t gfp , size_t size ,
0 commit comments