Skip to content

Commit d9c2cf6

Browse files
committed
s390/kfence: fix page fault reporting
Baoquan He reported lots of KFENCE reports when /proc/kcore is read, e.g. with crash or even simpler with dd: BUG: KFENCE: invalid read in copy_from_kernel_nofault+0x5e/0x120 Invalid read at 0x00000000f4f5149f: copy_from_kernel_nofault+0x5e/0x120 read_kcore+0x6b2/0x870 proc_reg_read+0x9a/0xf0 vfs_read+0x94/0x270 ksys_read+0x70/0x100 __do_syscall+0x1d0/0x200 system_call+0x82/0xb0 The reason for this is that read_kcore() simply reads memory that might have been unmapped by KFENCE with copy_from_kernel_nofault(). Any fault due to pages being unmapped by KFENCE would be handled gracefully by the fault handler (exception table fixup). However the s390 fault handler first reports the fault, and only afterwards would perform the exception table fixup. Most architectures have this in reversed order, which also avoids the false positive KFENCE reports when an unmapped page is accessed. Therefore change the s390 fault handler so it handles exception table fixups before KFENCE page faults are reported. Reported-by: Baoquan He <[email protected]> Tested-by: Baoquan He <[email protected]> Acked-by: Alexander Potapenko <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Heiko Carstens <[email protected]>
1 parent ad0faae commit d9c2cf6

File tree

1 file changed

+35
-14
lines changed

1 file changed

+35
-14
lines changed

arch/s390/mm/fault.c

Lines changed: 35 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,20 @@ static enum fault_type get_fault_type(struct pt_regs *regs)
100100
return KERNEL_FAULT;
101101
}
102102

103+
static unsigned long get_fault_address(struct pt_regs *regs)
104+
{
105+
unsigned long trans_exc_code = regs->int_parm_long;
106+
107+
return trans_exc_code & __FAIL_ADDR_MASK;
108+
}
109+
110+
static bool fault_is_write(struct pt_regs *regs)
111+
{
112+
unsigned long trans_exc_code = regs->int_parm_long;
113+
114+
return (trans_exc_code & store_indication) == 0x400;
115+
}
116+
103117
static int bad_address(void *p)
104118
{
105119
unsigned long dummy;
@@ -232,15 +246,26 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
232246
(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
233247
}
234248

235-
static noinline void do_no_context(struct pt_regs *regs)
249+
static noinline void do_no_context(struct pt_regs *regs, vm_fault_t fault)
236250
{
251+
enum fault_type fault_type;
252+
unsigned long address;
253+
bool is_write;
254+
237255
if (fixup_exception(regs))
238256
return;
257+
fault_type = get_fault_type(regs);
258+
if ((fault_type == KERNEL_FAULT) && (fault == VM_FAULT_BADCONTEXT)) {
259+
address = get_fault_address(regs);
260+
is_write = fault_is_write(regs);
261+
if (kfence_handle_page_fault(address, is_write, regs))
262+
return;
263+
}
239264
/*
240265
* Oops. The kernel tried to access some bad page. We'll have to
241266
* terminate things with extreme prejudice.
242267
*/
243-
if (get_fault_type(regs) == KERNEL_FAULT)
268+
if (fault_type == KERNEL_FAULT)
244269
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
245270
" in virtual kernel address space\n");
246271
else
@@ -259,7 +284,7 @@ static noinline void do_low_address(struct pt_regs *regs)
259284
die (regs, "Low-address protection");
260285
}
261286

262-
do_no_context(regs);
287+
do_no_context(regs, VM_FAULT_BADACCESS);
263288
}
264289

265290
static noinline void do_sigbus(struct pt_regs *regs)
@@ -290,28 +315,28 @@ static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
290315
fallthrough;
291316
case VM_FAULT_BADCONTEXT:
292317
case VM_FAULT_PFAULT:
293-
do_no_context(regs);
318+
do_no_context(regs, fault);
294319
break;
295320
case VM_FAULT_SIGNAL:
296321
if (!user_mode(regs))
297-
do_no_context(regs);
322+
do_no_context(regs, fault);
298323
break;
299324
default: /* fault & VM_FAULT_ERROR */
300325
if (fault & VM_FAULT_OOM) {
301326
if (!user_mode(regs))
302-
do_no_context(regs);
327+
do_no_context(regs, fault);
303328
else
304329
pagefault_out_of_memory();
305330
} else if (fault & VM_FAULT_SIGSEGV) {
306331
/* Kernel mode? Handle exceptions or die */
307332
if (!user_mode(regs))
308-
do_no_context(regs);
333+
do_no_context(regs, fault);
309334
else
310335
do_sigsegv(regs, SEGV_MAPERR);
311336
} else if (fault & VM_FAULT_SIGBUS) {
312337
/* Kernel mode? Handle exceptions or die */
313338
if (!user_mode(regs))
314-
do_no_context(regs);
339+
do_no_context(regs, fault);
315340
else
316341
do_sigbus(regs);
317342
} else
@@ -338,7 +363,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
338363
struct mm_struct *mm;
339364
struct vm_area_struct *vma;
340365
enum fault_type type;
341-
unsigned long trans_exc_code;
342366
unsigned long address;
343367
unsigned int flags;
344368
vm_fault_t fault;
@@ -355,9 +379,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
355379
return 0;
356380

357381
mm = tsk->mm;
358-
trans_exc_code = regs->int_parm_long;
359-
address = trans_exc_code & __FAIL_ADDR_MASK;
360-
is_write = (trans_exc_code & store_indication) == 0x400;
382+
address = get_fault_address(regs);
383+
is_write = fault_is_write(regs);
361384

362385
/*
363386
* Verify that the fault happened in user space, that
@@ -368,8 +391,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
368391
type = get_fault_type(regs);
369392
switch (type) {
370393
case KERNEL_FAULT:
371-
if (kfence_handle_page_fault(address, is_write, regs))
372-
return 0;
373394
goto out;
374395
case USER_FAULT:
375396
case GMAP_FAULT:

0 commit comments

Comments
 (0)