Skip to content

Commit 40f3bf0

Browse files
committed
mm: Convert struct page to struct slab in functions used by other subsystems
KASAN, KFENCE and memcg interact with SLAB or SLUB internals through functions nearest_obj(), obj_to_index() and objs_per_slab() that use struct page as parameter. This patch converts it to struct slab including all callers, through a coccinelle semantic patch. // Options: --include-headers --no-includes --smpl-spacing include/linux/slab_def.h include/linux/slub_def.h mm/slab.h mm/kasan/*.c mm/kfence/kfence_test.c mm/memcontrol.c mm/slab.c mm/slub.c // Note: needs coccinelle 1.1.1 to avoid breaking whitespace @@ @@ -objs_per_slab_page( +objs_per_slab( ... ) { ... } @@ @@ -objs_per_slab_page( +objs_per_slab( ... ) @@ identifier fn =~ "obj_to_index|objs_per_slab"; @@ fn(..., - const struct page *page + const struct slab *slab ,...) { <... ( - page_address(page) + slab_address(slab) | - page + slab ) ...> } @@ identifier fn =~ "nearest_obj"; @@ fn(..., - struct page *page + const struct slab *slab ,...) { <... ( - page_address(page) + slab_address(slab) | - page + slab ) ...> } @@ identifier fn =~ "nearest_obj|obj_to_index|objs_per_slab"; expression E; @@ fn(..., ( - slab_page(E) + E | - virt_to_page(E) + virt_to_slab(E) | - virt_to_head_page(E) + virt_to_slab(E) | - page + page_slab(page) ) ,...) Signed-off-by: Vlastimil Babka <[email protected]> Reviewed-by: Andrey Konovalov <[email protected]> Reviewed-by: Roman Gushchin <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Julia Lawall <[email protected]> Cc: Luis Chamberlain <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Andrey Konovalov <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Marco Elver <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Vladimir Davydov <[email protected]> Cc: <[email protected]> Cc: <[email protected]>
1 parent dd35f71 commit 40f3bf0

File tree

11 files changed

+34
-34
lines changed

11 files changed

+34
-34
lines changed

include/linux/slab_def.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -87,11 +87,11 @@ struct kmem_cache {
8787
struct kmem_cache_node *node[MAX_NUMNODES];
8888
};
8989

90-
static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
90+
static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
9191
void *x)
9292
{
93-
void *object = x - (x - page->s_mem) % cache->size;
94-
void *last_object = page->s_mem + (cache->num - 1) * cache->size;
93+
void *object = x - (x - slab->s_mem) % cache->size;
94+
void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
9595

9696
if (unlikely(object > last_object))
9797
return last_object;
@@ -106,16 +106,16 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
106106
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
107107
*/
108108
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
109-
const struct page *page, void *obj)
109+
const struct slab *slab, void *obj)
110110
{
111-
u32 offset = (obj - page->s_mem);
111+
u32 offset = (obj - slab->s_mem);
112112
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
113113
}
114114

115-
static inline int objs_per_slab_page(const struct kmem_cache *cache,
116-
const struct page *page)
115+
static inline int objs_per_slab(const struct kmem_cache *cache,
116+
const struct slab *slab)
117117
{
118-
if (is_kfence_address(page_address(page)))
118+
if (is_kfence_address(slab_address(slab)))
119119
return 1;
120120
return cache->num;
121121
}

include/linux/slub_def.h

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -158,11 +158,11 @@ static inline void sysfs_slab_release(struct kmem_cache *s)
158158

159159
void *fixup_red_left(struct kmem_cache *s, void *p);
160160

161-
static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
161+
static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
162162
void *x) {
163-
void *object = x - (x - page_address(page)) % cache->size;
164-
void *last_object = page_address(page) +
165-
(page->objects - 1) * cache->size;
163+
void *object = x - (x - slab_address(slab)) % cache->size;
164+
void *last_object = slab_address(slab) +
165+
(slab->objects - 1) * cache->size;
166166
void *result = (unlikely(object > last_object)) ? last_object : object;
167167

168168
result = fixup_red_left(cache, result);
@@ -178,16 +178,16 @@ static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
178178
}
179179

180180
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
181-
const struct page *page, void *obj)
181+
const struct slab *slab, void *obj)
182182
{
183183
if (is_kfence_address(obj))
184184
return 0;
185-
return __obj_to_index(cache, page_address(page), obj);
185+
return __obj_to_index(cache, slab_address(slab), obj);
186186
}
187187

188-
static inline int objs_per_slab_page(const struct kmem_cache *cache,
189-
const struct page *page)
188+
static inline int objs_per_slab(const struct kmem_cache *cache,
189+
const struct slab *slab)
190190
{
191-
return page->objects;
191+
return slab->objects;
192192
}
193193
#endif /* _LINUX_SLUB_DEF_H */

mm/kasan/common.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,7 @@ static inline u8 assign_tag(struct kmem_cache *cache,
298298
/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
299299
#ifdef CONFIG_SLAB
300300
/* For SLAB assign tags based on the object index in the freelist. */
301-
return (u8)obj_to_index(cache, virt_to_head_page(object), (void *)object);
301+
return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
302302
#else
303303
/*
304304
* For SLUB assign a random tag during slab creation, otherwise reuse
@@ -341,7 +341,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
341341
if (is_kfence_address(object))
342342
return false;
343343

344-
if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
344+
if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
345345
object)) {
346346
kasan_report_invalid_free(tagged_object, ip);
347347
return true;

mm/kasan/generic.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,7 @@ static void __kasan_record_aux_stack(void *addr, bool can_alloc)
339339
return;
340340

341341
cache = page->slab_cache;
342-
object = nearest_obj(cache, page, addr);
342+
object = nearest_obj(cache, page_slab(page), addr);
343343
alloc_meta = kasan_get_alloc_meta(cache, object);
344344
if (!alloc_meta)
345345
return;

mm/kasan/report.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ static void print_address_description(void *addr, u8 tag)
249249

250250
if (page && PageSlab(page)) {
251251
struct kmem_cache *cache = page->slab_cache;
252-
void *object = nearest_obj(cache, page, addr);
252+
void *object = nearest_obj(cache, page_slab(page), addr);
253253

254254
describe_object(cache, object, addr, tag);
255255
}

mm/kasan/report_tags.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
2323
page = kasan_addr_to_page(addr);
2424
if (page && PageSlab(page)) {
2525
cache = page->slab_cache;
26-
object = nearest_obj(cache, page, (void *)addr);
26+
object = nearest_obj(cache, page_slab(page), (void *)addr);
2727
alloc_meta = kasan_get_alloc_meta(cache, object);
2828

2929
if (alloc_meta) {

mm/kfence/kfence_test.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -291,8 +291,8 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat
291291
* even for KFENCE objects; these are required so that
292292
* memcg accounting works correctly.
293293
*/
294-
KUNIT_EXPECT_EQ(test, obj_to_index(s, page, alloc), 0U);
295-
KUNIT_EXPECT_EQ(test, objs_per_slab_page(s, page), 1);
294+
KUNIT_EXPECT_EQ(test, obj_to_index(s, page_slab(page), alloc), 0U);
295+
KUNIT_EXPECT_EQ(test, objs_per_slab(s, page_slab(page)), 1);
296296

297297
if (policy == ALLOCATE_ANY)
298298
return alloc;

mm/memcontrol.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2819,7 +2819,7 @@ static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
28192819
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
28202820
gfp_t gfp, bool new_page)
28212821
{
2822-
unsigned int objects = objs_per_slab_page(s, page);
2822+
unsigned int objects = objs_per_slab(s, page_slab(page));
28232823
unsigned long memcg_data;
28242824
void *vec;
28252825

@@ -2881,7 +2881,7 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p)
28812881
struct obj_cgroup *objcg;
28822882
unsigned int off;
28832883

2884-
off = obj_to_index(page->slab_cache, page, p);
2884+
off = obj_to_index(page->slab_cache, page_slab(page), p);
28852885
objcg = page_objcgs(page)[off];
28862886
if (objcg)
28872887
return obj_cgroup_memcg(objcg);

mm/slab.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1559,7 +1559,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
15591559
struct slab *slab = virt_to_slab(objp);
15601560
unsigned int objnr;
15611561

1562-
objnr = obj_to_index(cachep, slab_page(slab), objp);
1562+
objnr = obj_to_index(cachep, slab, objp);
15631563
if (objnr) {
15641564
objp = index_to_obj(cachep, slab, objnr - 1);
15651565
realobj = (char *)objp + obj_offset(cachep);
@@ -2529,7 +2529,7 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slab)
25292529
static void slab_put_obj(struct kmem_cache *cachep,
25302530
struct slab *slab, void *objp)
25312531
{
2532-
unsigned int objnr = obj_to_index(cachep, slab_page(slab), objp);
2532+
unsigned int objnr = obj_to_index(cachep, slab, objp);
25332533
#if DEBUG
25342534
unsigned int i;
25352535

@@ -2716,7 +2716,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
27162716
if (cachep->flags & SLAB_STORE_USER)
27172717
*dbg_userword(cachep, objp) = (void *)caller;
27182718

2719-
objnr = obj_to_index(cachep, slab_page(slab), objp);
2719+
objnr = obj_to_index(cachep, slab, objp);
27202720

27212721
BUG_ON(objnr >= cachep->num);
27222722
BUG_ON(objp != index_to_obj(cachep, slab, objnr));
@@ -3662,7 +3662,7 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
36623662
objp = object - obj_offset(cachep);
36633663
kpp->kp_data_offset = obj_offset(cachep);
36643664
slab = virt_to_slab(objp);
3665-
objnr = obj_to_index(cachep, slab_page(slab), objp);
3665+
objnr = obj_to_index(cachep, slab, objp);
36663666
objp = index_to_obj(cachep, slab, objnr);
36673667
kpp->kp_objp = objp;
36683668
if (DEBUG && cachep->flags & SLAB_STORE_USER)
@@ -4180,7 +4180,7 @@ void __check_heap_object(const void *ptr, unsigned long n,
41804180

41814181
/* Find and validate object. */
41824182
cachep = slab->slab_cache;
4183-
objnr = obj_to_index(cachep, slab_page(slab), (void *)ptr);
4183+
objnr = obj_to_index(cachep, slab, (void *)ptr);
41844184
BUG_ON(objnr >= cachep->num);
41854185

41864186
/* Find offset within object. */

mm/slab.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -483,7 +483,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
483483
continue;
484484
}
485485

486-
off = obj_to_index(s, page, p[i]);
486+
off = obj_to_index(s, page_slab(page), p[i]);
487487
obj_cgroup_get(objcg);
488488
page_objcgs(page)[off] = objcg;
489489
mod_objcg_state(objcg, page_pgdat(page),
@@ -522,7 +522,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
522522
else
523523
s = s_orig;
524524

525-
off = obj_to_index(s, page, p[i]);
525+
off = obj_to_index(s, page_slab(page), p[i]);
526526
objcg = objcgs[off];
527527
if (!objcg)
528528
continue;

0 commit comments

Comments
 (0)