Skip to content

Commit c4fb7f0

Browse files
howletttehcaster
authored andcommitted
tools/testing: Add support for changes to slab for sheaves
The slab changes for sheaves requires more effort in the testing code. Unite all the kmem_cache work into the tools/include slab header for both the vma and maple tree testing. The vma test code also requires importing more #defines to allow for seamless use of the shared kmem_cache code. This adds the pthread header to the slab header in the tools directory to allow for the pthread_mutex in linux.c. Signed-off-by: Liam R. Howlett <[email protected]> Reviewed-by: Suren Baghdasaryan <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent d09a61a commit c4fb7f0

File tree

4 files changed

+142
-114
lines changed

4 files changed

+142
-114
lines changed

tools/include/linux/slab.h

Lines changed: 133 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,31 @@
44

55
#include <linux/types.h>
66
#include <linux/gfp.h>
7+
#include <pthread.h>
78

8-
#define SLAB_PANIC 2
99
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
1010

1111
#define kzalloc_node(size, flags, node) kmalloc(size, flags)
12+
enum _slab_flag_bits {
13+
_SLAB_KMALLOC,
14+
_SLAB_HWCACHE_ALIGN,
15+
_SLAB_PANIC,
16+
_SLAB_TYPESAFE_BY_RCU,
17+
_SLAB_ACCOUNT,
18+
_SLAB_FLAGS_LAST_BIT
19+
};
20+
21+
#define __SLAB_FLAG_BIT(nr) ((unsigned int __force)(1U << (nr)))
22+
#define __SLAB_FLAG_UNUSED ((unsigned int __force)(0U))
23+
24+
#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
25+
#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
26+
#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
27+
#ifdef CONFIG_MEMCG
28+
# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
29+
#else
30+
# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
31+
#endif
1232

1333
void *kmalloc(size_t size, gfp_t gfp);
1434
void kfree(void *p);
@@ -23,6 +43,86 @@ enum slab_state {
2343
FULL
2444
};
2545

46+
struct kmem_cache {
47+
pthread_mutex_t lock;
48+
unsigned int size;
49+
unsigned int align;
50+
unsigned int sheaf_capacity;
51+
int nr_objs;
52+
void *objs;
53+
void (*ctor)(void *);
54+
bool non_kernel_enabled;
55+
unsigned int non_kernel;
56+
unsigned long nr_allocated;
57+
unsigned long nr_tallocated;
58+
bool exec_callback;
59+
void (*callback)(void *);
60+
void *private;
61+
};
62+
63+
struct kmem_cache_args {
64+
/**
65+
* @align: The required alignment for the objects.
66+
*
67+
* %0 means no specific alignment is requested.
68+
*/
69+
unsigned int align;
70+
/**
71+
* @sheaf_capacity: The maximum size of the sheaf.
72+
*/
73+
unsigned int sheaf_capacity;
74+
/**
75+
* @useroffset: Usercopy region offset.
76+
*
77+
* %0 is a valid offset, when @usersize is non-%0
78+
*/
79+
unsigned int useroffset;
80+
/**
81+
* @usersize: Usercopy region size.
82+
*
83+
* %0 means no usercopy region is specified.
84+
*/
85+
unsigned int usersize;
86+
/**
87+
* @freeptr_offset: Custom offset for the free pointer
88+
* in &SLAB_TYPESAFE_BY_RCU caches
89+
*
90+
* By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
91+
* outside of the object. This might cause the object to grow in size.
92+
* Cache creators that have a reason to avoid this can specify a custom
93+
* free pointer offset in their struct where the free pointer will be
94+
* placed.
95+
*
96+
* Note that placing the free pointer inside the object requires the
97+
* caller to ensure that no fields are invalidated that are required to
98+
* guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
99+
* details).
100+
*
101+
* Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
102+
* is specified, %use_freeptr_offset must be set %true.
103+
*
104+
* Note that @ctor currently isn't supported with custom free pointers
105+
* as a @ctor requires an external free pointer.
106+
*/
107+
unsigned int freeptr_offset;
108+
/**
109+
* @use_freeptr_offset: Whether a @freeptr_offset is used.
110+
*/
111+
bool use_freeptr_offset;
112+
/**
113+
* @ctor: A constructor for the objects.
114+
*
115+
* The constructor is invoked for each object in a newly allocated slab
116+
* page. It is the cache user's responsibility to free object in the
117+
* same state as after calling the constructor, or deal appropriately
118+
* with any differences between a freshly constructed and a reallocated
119+
* object.
120+
*
121+
* %NULL means no constructor.
122+
*/
123+
void (*ctor)(void *);
124+
};
125+
26126
static inline void *kzalloc(size_t size, gfp_t gfp)
27127
{
28128
return kmalloc(size, gfp | __GFP_ZERO);
@@ -37,9 +137,38 @@ static inline void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
37137
}
38138
void kmem_cache_free(struct kmem_cache *cachep, void *objp);
39139

40-
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
41-
unsigned int align, unsigned int flags,
42-
void (*ctor)(void *));
140+
141+
struct kmem_cache *
142+
__kmem_cache_create_args(const char *name, unsigned int size,
143+
struct kmem_cache_args *args, unsigned int flags);
144+
145+
/* If NULL is passed for @args, use this variant with default arguments. */
146+
static inline struct kmem_cache *
147+
__kmem_cache_default_args(const char *name, unsigned int size,
148+
struct kmem_cache_args *args, unsigned int flags)
149+
{
150+
struct kmem_cache_args kmem_default_args = {};
151+
152+
return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
153+
}
154+
155+
static inline struct kmem_cache *
156+
__kmem_cache_create(const char *name, unsigned int size, unsigned int align,
157+
unsigned int flags, void (*ctor)(void *))
158+
{
159+
struct kmem_cache_args kmem_args = {
160+
.align = align,
161+
.ctor = ctor,
162+
};
163+
164+
return __kmem_cache_create_args(name, size, &kmem_args, flags);
165+
}
166+
167+
#define kmem_cache_create(__name, __object_size, __args, ...) \
168+
_Generic((__args), \
169+
struct kmem_cache_args *: __kmem_cache_create_args, \
170+
void *: __kmem_cache_default_args, \
171+
default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
43172

44173
void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
45174
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,

tools/testing/shared/linux.c

Lines changed: 7 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -16,21 +16,6 @@ int nr_allocated;
1616
int preempt_count;
1717
int test_verbose;
1818

19-
struct kmem_cache {
20-
pthread_mutex_t lock;
21-
unsigned int size;
22-
unsigned int align;
23-
int nr_objs;
24-
void *objs;
25-
void (*ctor)(void *);
26-
unsigned int non_kernel;
27-
unsigned long nr_allocated;
28-
unsigned long nr_tallocated;
29-
bool exec_callback;
30-
void (*callback)(void *);
31-
void *private;
32-
};
33-
3419
void kmem_cache_set_callback(struct kmem_cache *cachep, void (*callback)(void *))
3520
{
3621
cachep->callback = callback;
@@ -234,23 +219,26 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
234219
}
235220

236221
struct kmem_cache *
237-
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
238-
unsigned int flags, void (*ctor)(void *))
222+
__kmem_cache_create_args(const char *name, unsigned int size,
223+
struct kmem_cache_args *args,
224+
unsigned int flags)
239225
{
240226
struct kmem_cache *ret = malloc(sizeof(*ret));
241227

242228
pthread_mutex_init(&ret->lock, NULL);
243229
ret->size = size;
244-
ret->align = align;
230+
ret->align = args->align;
231+
ret->sheaf_capacity = args->sheaf_capacity;
245232
ret->nr_objs = 0;
246233
ret->nr_allocated = 0;
247234
ret->nr_tallocated = 0;
248235
ret->objs = NULL;
249-
ret->ctor = ctor;
236+
ret->ctor = args->ctor;
250237
ret->non_kernel = 0;
251238
ret->exec_callback = false;
252239
ret->callback = NULL;
253240
ret->private = NULL;
241+
254242
return ret;
255243
}
256244

tools/testing/shared/maple-shim.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,6 @@
33
/* Very simple shim around the maple tree. */
44

55
#include "maple-shared.h"
6+
#include <linux/slab.h>
67

78
#include "../../../lib/maple_tree.c"

tools/testing/vma/vma_internal.h

Lines changed: 1 addition & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include <linux/mm.h>
2727
#include <linux/rbtree.h>
2828
#include <linux/refcount.h>
29+
#include <linux/slab.h>
2930

3031
extern unsigned long stack_guard_gap;
3132
#ifdef CONFIG_MMU
@@ -509,65 +510,6 @@ struct pagetable_move_control {
509510
.len_in = len_, \
510511
}
511512

512-
struct kmem_cache_args {
513-
/**
514-
* @align: The required alignment for the objects.
515-
*
516-
* %0 means no specific alignment is requested.
517-
*/
518-
unsigned int align;
519-
/**
520-
* @useroffset: Usercopy region offset.
521-
*
522-
* %0 is a valid offset, when @usersize is non-%0
523-
*/
524-
unsigned int useroffset;
525-
/**
526-
* @usersize: Usercopy region size.
527-
*
528-
* %0 means no usercopy region is specified.
529-
*/
530-
unsigned int usersize;
531-
/**
532-
* @freeptr_offset: Custom offset for the free pointer
533-
* in &SLAB_TYPESAFE_BY_RCU caches
534-
*
535-
* By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
536-
* outside of the object. This might cause the object to grow in size.
537-
* Cache creators that have a reason to avoid this can specify a custom
538-
* free pointer offset in their struct where the free pointer will be
539-
* placed.
540-
*
541-
* Note that placing the free pointer inside the object requires the
542-
* caller to ensure that no fields are invalidated that are required to
543-
* guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
544-
* details).
545-
*
546-
* Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
547-
* is specified, %use_freeptr_offset must be set %true.
548-
*
549-
* Note that @ctor currently isn't supported with custom free pointers
550-
* as a @ctor requires an external free pointer.
551-
*/
552-
unsigned int freeptr_offset;
553-
/**
554-
* @use_freeptr_offset: Whether a @freeptr_offset is used.
555-
*/
556-
bool use_freeptr_offset;
557-
/**
558-
* @ctor: A constructor for the objects.
559-
*
560-
* The constructor is invoked for each object in a newly allocated slab
561-
* page. It is the cache user's responsibility to free object in the
562-
* same state as after calling the constructor, or deal appropriately
563-
* with any differences between a freshly constructed and a reallocated
564-
* object.
565-
*
566-
* %NULL means no constructor.
567-
*/
568-
void (*ctor)(void *);
569-
};
570-
571513
static inline void vma_iter_invalidate(struct vma_iterator *vmi)
572514
{
573515
mas_pause(&vmi->mas);
@@ -652,38 +594,6 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
652594
vma->vm_lock_seq = UINT_MAX;
653595
}
654596

655-
struct kmem_cache {
656-
const char *name;
657-
size_t object_size;
658-
struct kmem_cache_args *args;
659-
};
660-
661-
static inline struct kmem_cache *__kmem_cache_create(const char *name,
662-
size_t object_size,
663-
struct kmem_cache_args *args)
664-
{
665-
struct kmem_cache *ret = malloc(sizeof(struct kmem_cache));
666-
667-
ret->name = name;
668-
ret->object_size = object_size;
669-
ret->args = args;
670-
671-
return ret;
672-
}
673-
674-
#define kmem_cache_create(__name, __object_size, __args, ...) \
675-
__kmem_cache_create((__name), (__object_size), (__args))
676-
677-
static inline void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
678-
{
679-
return calloc(1, s->object_size);
680-
}
681-
682-
static inline void kmem_cache_free(struct kmem_cache *s, void *x)
683-
{
684-
free(x);
685-
}
686-
687597
/*
688598
* These are defined in vma.h, but sadly vm_stat_account() is referenced by
689599
* kernel/fork.c, so we have to these broadly available there, and temporarily

0 commit comments

Comments
 (0)