Skip to content

Commit 47ebd03

Browse files
ramosian-gliderakpm00
authored andcommitted
mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush()
As reported by Dipanjan Das, when KMSAN is used together with kernel fault injection (or, generally, even without the latter), calls to kcalloc() or __vmap_pages_range_noflush() may fail, leaving the metadata mappings for the virtual mapping in an inconsistent state. When these metadata mappings are accessed later, the kernel crashes. To address the problem, we return a non-zero error code from kmsan_vmap_pages_range_noflush() in the case of any allocation/mapping failure inside it, and make vmap_pages_range_noflush() return an error if KMSAN fails to allocate the metadata. This patch also removes KMSAN_WARN_ON() from vmap_pages_range_noflush(), as these allocation failures are not fatal anymore. Link: https://lkml.kernel.org/r/[email protected] Fixes: b073d7f ("mm: kmsan: maintain KMSAN metadata for page operations") Signed-off-by: Alexander Potapenko <[email protected]> Reported-by: Dipanjan Das <[email protected]> Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/ Reviewed-by: Marco Elver <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Uladzislau Rezki (Sony) <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent a101482 commit 47ebd03

File tree

3 files changed

+34
-19
lines changed

3 files changed

+34
-19
lines changed

include/linux/kmsan.h

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -134,11 +134,12 @@ void kmsan_kfree_large(const void *ptr);
134134
* @page_shift: page_shift passed to vmap_range_noflush().
135135
*
136136
* KMSAN maps shadow and origin pages of @pages into contiguous ranges in
137-
* vmalloc metadata address range.
137+
* vmalloc metadata address range. Returns 0 on success, callers must check
138+
* for non-zero return value.
138139
*/
139-
void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
140-
pgprot_t prot, struct page **pages,
141-
unsigned int page_shift);
140+
int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
141+
pgprot_t prot, struct page **pages,
142+
unsigned int page_shift);
142143

143144
/**
144145
* kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
@@ -281,12 +282,13 @@ static inline void kmsan_kfree_large(const void *ptr)
281282
{
282283
}
283284

284-
static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
285-
unsigned long end,
286-
pgprot_t prot,
287-
struct page **pages,
288-
unsigned int page_shift)
285+
static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
286+
unsigned long end,
287+
pgprot_t prot,
288+
struct page **pages,
289+
unsigned int page_shift)
289290
{
291+
return 0;
290292
}
291293

292294
static inline void kmsan_vunmap_range_noflush(unsigned long start,

mm/kmsan/shadow.c

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, unsigned int order)
216216
kmsan_leave_runtime();
217217
}
218218

219-
void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
220-
pgprot_t prot, struct page **pages,
221-
unsigned int page_shift)
219+
int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
220+
pgprot_t prot, struct page **pages,
221+
unsigned int page_shift)
222222
{
223223
unsigned long shadow_start, origin_start, shadow_end, origin_end;
224224
struct page **s_pages, **o_pages;
225-
int nr, mapped;
225+
int nr, mapped, err = 0;
226226

227227
if (!kmsan_enabled)
228-
return;
228+
return 0;
229229

230230
shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
231231
shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
232232
if (!shadow_start)
233-
return;
233+
return 0;
234234

235235
nr = (end - start) / PAGE_SIZE;
236236
s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
237237
o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
238-
if (!s_pages || !o_pages)
238+
if (!s_pages || !o_pages) {
239+
err = -ENOMEM;
239240
goto ret;
241+
}
240242
for (int i = 0; i < nr; i++) {
241243
s_pages[i] = shadow_page_for(pages[i]);
242244
o_pages[i] = origin_page_for(pages[i]);
@@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
249251
kmsan_enter_runtime();
250252
mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
251253
s_pages, page_shift);
252-
KMSAN_WARN_ON(mapped);
254+
if (mapped) {
255+
err = mapped;
256+
goto ret;
257+
}
253258
mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
254259
o_pages, page_shift);
255-
KMSAN_WARN_ON(mapped);
260+
if (mapped) {
261+
err = mapped;
262+
goto ret;
263+
}
256264
kmsan_leave_runtime();
257265
flush_tlb_kernel_range(shadow_start, shadow_end);
258266
flush_tlb_kernel_range(origin_start, origin_end);
@@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
262270
ret:
263271
kfree(s_pages);
264272
kfree(o_pages);
273+
return err;
265274
}
266275

267276
/* Allocate metadata for pages allocated at boot time. */

mm/vmalloc.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -605,7 +605,11 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
605605
int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
606606
pgprot_t prot, struct page **pages, unsigned int page_shift)
607607
{
608-
kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
608+
int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
609+
page_shift);
610+
611+
if (ret)
612+
return ret;
609613
return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
610614
}
611615

0 commit comments

Comments
 (0)