Skip to content

Commit 9b94fe9

Browse files
xairyakpm00
authored andcommitted
kasan: move kasan_mempool_poison_object
Move kasan_mempool_poison_object after all slab-related KASAN hooks. This is a preparatory change for the following patches in this series. No functional changes. Link: https://lkml.kernel.org/r/23ea215409f43c13cdf9ecc454501a264c107d67.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <[email protected]> Cc: Alexander Lobakin <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Breno Leitao <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Evgenii Stepanov <[email protected]> Cc: Marco Elver <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 280ec6c commit 9b94fe9

File tree

2 files changed

+31
-31
lines changed

2 files changed

+31
-31
lines changed

include/linux/kasan.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -172,13 +172,6 @@ static __always_inline void kasan_kfree_large(void *ptr)
172172
__kasan_kfree_large(ptr, _RET_IP_);
173173
}
174174

175-
void __kasan_mempool_poison_object(void *ptr, unsigned long ip);
176-
static __always_inline void kasan_mempool_poison_object(void *ptr)
177-
{
178-
if (kasan_enabled())
179-
__kasan_mempool_poison_object(ptr, _RET_IP_);
180-
}
181-
182175
void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
183176
void *object, gfp_t flags, bool init);
184177
static __always_inline void * __must_check kasan_slab_alloc(
@@ -219,6 +212,13 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
219212
return (void *)object;
220213
}
221214

215+
void __kasan_mempool_poison_object(void *ptr, unsigned long ip);
216+
static __always_inline void kasan_mempool_poison_object(void *ptr)
217+
{
218+
if (kasan_enabled())
219+
__kasan_mempool_poison_object(ptr, _RET_IP_);
220+
}
221+
222222
/*
223223
* Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
224224
* the hardware tag-based mode that doesn't rely on compiler instrumentation.
@@ -256,7 +256,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init
256256
return false;
257257
}
258258
static inline void kasan_kfree_large(void *ptr) {}
259-
static inline void kasan_mempool_poison_object(void *ptr) {}
260259
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
261260
gfp_t flags, bool init)
262261
{
@@ -276,6 +275,7 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
276275
{
277276
return (void *)object;
278277
}
278+
static inline void kasan_mempool_poison_object(void *ptr) {}
279279
static inline bool kasan_check_byte(const void *address)
280280
{
281281
return true;

mm/kasan/common.c

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -282,29 +282,6 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
282282
____kasan_kfree_large(ptr, ip);
283283
}
284284

285-
void __kasan_mempool_poison_object(void *ptr, unsigned long ip)
286-
{
287-
struct folio *folio;
288-
289-
folio = virt_to_folio(ptr);
290-
291-
/*
292-
* Even though this function is only called for kmem_cache_alloc and
293-
* kmalloc backed mempool allocations, those allocations can still be
294-
* !PageSlab() when the size provided to kmalloc is larger than
295-
* KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
296-
*/
297-
if (unlikely(!folio_test_slab(folio))) {
298-
if (____kasan_kfree_large(ptr, ip))
299-
return;
300-
kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
301-
} else {
302-
struct slab *slab = folio_slab(folio);
303-
304-
____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
305-
}
306-
}
307-
308285
void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
309286
void *object, gfp_t flags, bool init)
310287
{
@@ -452,6 +429,29 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
452429
return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
453430
}
454431

432+
void __kasan_mempool_poison_object(void *ptr, unsigned long ip)
433+
{
434+
struct folio *folio;
435+
436+
folio = virt_to_folio(ptr);
437+
438+
/*
439+
* Even though this function is only called for kmem_cache_alloc and
440+
* kmalloc backed mempool allocations, those allocations can still be
441+
* !PageSlab() when the size provided to kmalloc is larger than
442+
* KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
443+
*/
444+
if (unlikely(!folio_test_slab(folio))) {
445+
if (____kasan_kfree_large(ptr, ip))
446+
return;
447+
kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
448+
} else {
449+
struct slab *slab = folio_slab(folio);
450+
451+
____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
452+
}
453+
}
454+
455455
bool __kasan_check_byte(const void *address, unsigned long ip)
456456
{
457457
if (!kasan_byte_accessible(address)) {

0 commit comments

Comments
 (0)