Skip to content

Commit f129c31

Browse files
xairyakpm00
authored andcommitted
kasan: introduce kasan_mempool_poison_pages
Introduce and document a kasan_mempool_poison_pages hook to be used by the mempool code instead of kasan_poison_pages. Compated to kasan_poison_pages, the new hook: 1. For the tag-based modes, skips checking and poisoning allocations that were not tagged due to sampling. 2. Checks for double-free and invalid-free bugs. In the future, kasan_poison_pages can also be updated to handle #2, but this is out-of-scope of this series. Link: https://lkml.kernel.org/r/88dc7340cce28249abf789f6e0c792c317df9ba5.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <[email protected]> Cc: Alexander Lobakin <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Breno Leitao <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Evgenii Stepanov <[email protected]> Cc: Marco Elver <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 1956832 commit f129c31

File tree

2 files changed

+50
-0
lines changed

2 files changed

+50
-0
lines changed

include/linux/kasan.h

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -212,6 +212,29 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
212212
return (void *)object;
213213
}
214214

215+
bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
216+
unsigned long ip);
217+
/**
218+
* kasan_mempool_poison_pages - Check and poison a mempool page allocation.
219+
* @page: Pointer to the page allocation.
220+
* @order: Order of the allocation.
221+
*
222+
* This function is intended for kernel subsystems that cache page allocations
223+
* to reuse them instead of freeing them back to page_alloc (e.g. mempool).
224+
*
225+
* This function is similar to kasan_mempool_poison_object() but operates on
226+
* page allocations.
227+
*
228+
* Return: true if the allocation can be safely reused; false otherwise.
229+
*/
230+
static __always_inline bool kasan_mempool_poison_pages(struct page *page,
231+
unsigned int order)
232+
{
233+
if (kasan_enabled())
234+
return __kasan_mempool_poison_pages(page, order, _RET_IP_);
235+
return true;
236+
}
237+
215238
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
216239
/**
217240
* kasan_mempool_poison_object - Check and poison a mempool slab allocation.
@@ -326,6 +349,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
326349
{
327350
return (void *)object;
328351
}
352+
static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
353+
{
354+
return true;
355+
}
329356
static inline bool kasan_mempool_poison_object(void *ptr)
330357
{
331358
return true;

mm/kasan/common.c

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -426,6 +426,29 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
426426
return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
427427
}
428428

429+
bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
430+
unsigned long ip)
431+
{
432+
unsigned long *ptr;
433+
434+
if (unlikely(PageHighMem(page)))
435+
return true;
436+
437+
/* Bail out if allocation was excluded due to sampling. */
438+
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
439+
page_kasan_tag(page) == KASAN_TAG_KERNEL)
440+
return true;
441+
442+
ptr = page_address(page);
443+
444+
if (check_page_allocation(ptr, ip))
445+
return false;
446+
447+
kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
448+
449+
return true;
450+
}
451+
429452
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
430453
{
431454
struct folio *folio;

0 commit comments

Comments
 (0)