Skip to content

Commit a3d0b7a

Browse files
author
Alexander Gordeev
committed
Merge branch 'uaccess-key' into features
Heiko Carstens says: =================== A rather large series which is supposed to fix the crash below[1], which was seen when running the memop kernel kvm selftest. Problem is that cmpxchg_user_key() is executing code with a non-default key. If a system is IPL'ed with "LOAD NORMAL", and in addition the previous system used storage keys where the fetch-protection bit is set for some pages, and the cmpxchg_user_key() is located within such page a protection exception will happen when executing such code. Idea of this series is to register all code locations running with a non-default key at compile time. All functions, which run with a non-default key, then must explicitly call an init function which initializes the storage key of all pages containing such code locations with default key, which prevents such protection exceptions. Furthermore all functions containing code which may be executed with a non-default access key must be marked with __kprobes to prevent out-of-line execution of any instruction of such functions, which would result in the same problem. By default the kernel will not issue any storage key changing instructions like before, which will preserve the keyless-subset mode optimizations in hosts. Other possible implementations which I discarded: - Moving the code to an own section. This would require an s390 specific change to modpost.c, which complains about section mismatches (EX_TABLE entries in non-default text section). No other architecture has something similar, so let's keep this architecture specific hack local. - Just apply the default storage key to the whole kprobes text section. However this would add special s390 semantics to the kprobes text section, which no other architecture has. History has shown that such hacks fire back sooner or later. Furthermore, and to keep this whole stuff quite simple, this only works for code locations in core kernel code, not within modules. After this series there is no module code left with such code, and as of now I don't see any new kernel code which runs with a non-default access key. Note: the original crash can be reproduced by replacing page_set_storage_key(real, PAGE_DEFAULT_KEY, 1); with page_set_storage_key(real, 8, 1); in arch/s390/kernel/skey.c:__skey_regions_initialize() And then run tools/testing/selftests/kvm/s390/memop from the kernel selftests. [1]: Unable to handle kernel pointer dereference in virtual kernel address space Failing address: 0000000000000000 TEID: 000000000000080b Fault in home space mode while using kernel ASCE. AS:0000000002528007 R3:00000001ffffc007 S:00000001ffffb801 P:000000000000013d Oops: 0004 ilc:1 [#1]SMP Modules linked in: CPU: 3 UID: 0 PID: 791 Comm: memop Not tainted 6.16.0-rc1-00006-g3b568201d0a6-dirty #11 NONE Hardware name: IBM 3931 A01 704 (z/VM 7.4.0) Krnl PSW : 0794f00180000000 000003ffe0f4d91e (__cmpxchg_user_key1+0xbe/0x190) R:0 T:1 IO:1 EX:1 Key:9 M:1 W:0 P:0 AS:3 CC:3 PM:0 RI:0 EA:3 Krnl GPRS: 070003ffdfbf6af0 0000000000070000 0000000095b5a300 0000000000000000 00000000f1000000 0000000000000000 0000000000000090 0000000000000000 0000000000000040 0000000000000018 000003ff9b23d000 0000037fe0ef7bd8 000003ffdfbf7500 00000000962e4000 0000037f00ffffff 0000037fe0ef7aa0 Krnl Code: 000003ffe0f4d912: ad03f0a0 stosm 160(%r15),3 000003ffe0f4d916: a7780000 lhi %r7,0 #000003ffe0f4d91a: b20a6000 spka 0(%r6) >000003ffe0f4d91e: b2790100 sacf 256 000003ffe0f4d922: a56f0080 llill %r6,128 000003ffe0f4d926: 5810a000 l %r1,0(%r10) 000003ffe0f4d92a: 141e nr %r1,%r14 000003ffe0f4d92c: c0e7ffffffff xilf %r14,4294967295 Call Trace: [<000003ffe0f4d91e>] __cmpxchg_user_key1+0xbe/0x190 [<000003ffe0189c6e>] cmpxchg_guest_abs_with_key+0x2fe/0x370 [<000003ffe016d28e>] kvm_s390_vm_mem_op_cmpxchg+0x17e/0x350 [<000003ffe0173284>] kvm_arch_vm_ioctl+0x354/0x6f0 [<000003ffe015fedc>] kvm_vm_ioctl+0x2cc/0x6e0 [<000003ffe05348ae>] vfs_ioctl+0x2e/0x70 [<000003ffe0535e70>] __s390x_sys_ioctl+0xe0/0x100 [<000003ffe0f40f06>] __do_syscall+0x136/0x340 [<000003ffe0f4cb2e>] system_call+0x6e/0x90 Last Breaking-Event-Address: [<000003ffe0f4d896>] __cmpxchg_user_key1+0x36/0x190 =================== Signed-off-by: Alexander Gordeev <[email protected]>
2 parents fbb3bdf + 82d6229 commit a3d0b7a

File tree

7 files changed

+312
-187
lines changed

7 files changed

+312
-187
lines changed

arch/s390/include/asm/page.h

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -130,11 +130,19 @@ typedef pte_t *pgtable_t;
130130
static inline void page_set_storage_key(unsigned long addr,
131131
unsigned char skey, int mapped)
132132
{
133-
if (!mapped)
134-
asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
135-
: : "d" (skey), "a" (addr));
136-
else
137-
asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
133+
if (!mapped) {
134+
asm volatile(
135+
" .insn rrf,0xb22b0000,%[skey],%[addr],8,0"
136+
:
137+
: [skey] "d" (skey), [addr] "a" (addr)
138+
: "memory");
139+
} else {
140+
asm volatile(
141+
" sske %[skey],%[addr]"
142+
:
143+
: [skey] "d" (skey), [addr] "a" (addr)
144+
: "memory");
145+
}
138146
}
139147

140148
static inline unsigned char page_get_storage_key(unsigned long addr)

arch/s390/include/asm/skey.h

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef __ASM_SKEY_H
3+
#define __ASM_SKEY_H
4+
5+
#include <asm/rwonce.h>
6+
7+
struct skey_region {
8+
unsigned long start;
9+
unsigned long end;
10+
};
11+
12+
#define SKEY_REGION(_start, _end) \
13+
stringify_in_c(.section .skey_region,"a";) \
14+
stringify_in_c(.balign 8;) \
15+
stringify_in_c(.quad (_start);) \
16+
stringify_in_c(.quad (_end);) \
17+
stringify_in_c(.previous)
18+
19+
extern int skey_regions_initialized;
20+
extern struct skey_region __skey_region_start[];
21+
extern struct skey_region __skey_region_end[];
22+
23+
void __skey_regions_initialize(void);
24+
25+
static inline void skey_regions_initialize(void)
26+
{
27+
if (READ_ONCE(skey_regions_initialized))
28+
return;
29+
__skey_regions_initialize();
30+
}
31+
32+
#endif /* __ASM_SKEY_H */

arch/s390/include/asm/uaccess.h

Lines changed: 23 additions & 181 deletions
Original file line numberDiff line numberDiff line change
@@ -473,188 +473,30 @@ do { \
473473

474474
void __cmpxchg_user_key_called_with_bad_pointer(void);
475475

476-
#define CMPXCHG_USER_KEY_MAX_LOOPS 128
477-
478-
static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
479-
__uint128_t old, __uint128_t new,
480-
unsigned long key, int size)
476+
int __cmpxchg_user_key1(unsigned long address, unsigned char *uval,
477+
unsigned char old, unsigned char new, unsigned long key);
478+
int __cmpxchg_user_key2(unsigned long address, unsigned short *uval,
479+
unsigned short old, unsigned short new, unsigned long key);
480+
int __cmpxchg_user_key4(unsigned long address, unsigned int *uval,
481+
unsigned int old, unsigned int new, unsigned long key);
482+
int __cmpxchg_user_key8(unsigned long address, unsigned long *uval,
483+
unsigned long old, unsigned long new, unsigned long key);
484+
int __cmpxchg_user_key16(unsigned long address, __uint128_t *uval,
485+
__uint128_t old, __uint128_t new, unsigned long key);
486+
487+
static __always_inline int _cmpxchg_user_key(unsigned long address, void *uval,
488+
__uint128_t old, __uint128_t new,
489+
unsigned long key, int size)
481490
{
482-
bool sacf_flag;
483-
int rc = 0;
484-
485491
switch (size) {
486-
case 1: {
487-
unsigned int prev, shift, mask, _old, _new;
488-
unsigned long count;
489-
490-
shift = (3 ^ (address & 3)) << 3;
491-
address ^= address & 3;
492-
_old = ((unsigned int)old & 0xff) << shift;
493-
_new = ((unsigned int)new & 0xff) << shift;
494-
mask = ~(0xff << shift);
495-
sacf_flag = enable_sacf_uaccess();
496-
asm_inline volatile(
497-
" spka 0(%[key])\n"
498-
" sacf 256\n"
499-
" llill %[count],%[max_loops]\n"
500-
"0: l %[prev],%[address]\n"
501-
"1: nr %[prev],%[mask]\n"
502-
" xilf %[mask],0xffffffff\n"
503-
" or %[new],%[prev]\n"
504-
" or %[prev],%[tmp]\n"
505-
"2: lr %[tmp],%[prev]\n"
506-
"3: cs %[prev],%[new],%[address]\n"
507-
"4: jnl 5f\n"
508-
" xr %[tmp],%[prev]\n"
509-
" xr %[new],%[tmp]\n"
510-
" nr %[tmp],%[mask]\n"
511-
" jnz 5f\n"
512-
" brct %[count],2b\n"
513-
"5: sacf 768\n"
514-
" spka %[default_key]\n"
515-
EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
516-
EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
517-
EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
518-
EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
519-
: [rc] "+&d" (rc),
520-
[prev] "=&d" (prev),
521-
[address] "+Q" (*(int *)address),
522-
[tmp] "+&d" (_old),
523-
[new] "+&d" (_new),
524-
[mask] "+&d" (mask),
525-
[count] "=a" (count)
526-
: [key] "%[count]" (key << 4),
527-
[default_key] "J" (PAGE_DEFAULT_KEY),
528-
[max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
529-
: "memory", "cc");
530-
disable_sacf_uaccess(sacf_flag);
531-
*(unsigned char *)uval = prev >> shift;
532-
if (!count)
533-
rc = -EAGAIN;
534-
return rc;
535-
}
536-
case 2: {
537-
unsigned int prev, shift, mask, _old, _new;
538-
unsigned long count;
539-
540-
shift = (2 ^ (address & 2)) << 3;
541-
address ^= address & 2;
542-
_old = ((unsigned int)old & 0xffff) << shift;
543-
_new = ((unsigned int)new & 0xffff) << shift;
544-
mask = ~(0xffff << shift);
545-
sacf_flag = enable_sacf_uaccess();
546-
asm_inline volatile(
547-
" spka 0(%[key])\n"
548-
" sacf 256\n"
549-
" llill %[count],%[max_loops]\n"
550-
"0: l %[prev],%[address]\n"
551-
"1: nr %[prev],%[mask]\n"
552-
" xilf %[mask],0xffffffff\n"
553-
" or %[new],%[prev]\n"
554-
" or %[prev],%[tmp]\n"
555-
"2: lr %[tmp],%[prev]\n"
556-
"3: cs %[prev],%[new],%[address]\n"
557-
"4: jnl 5f\n"
558-
" xr %[tmp],%[prev]\n"
559-
" xr %[new],%[tmp]\n"
560-
" nr %[tmp],%[mask]\n"
561-
" jnz 5f\n"
562-
" brct %[count],2b\n"
563-
"5: sacf 768\n"
564-
" spka %[default_key]\n"
565-
EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
566-
EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
567-
EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
568-
EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
569-
: [rc] "+&d" (rc),
570-
[prev] "=&d" (prev),
571-
[address] "+Q" (*(int *)address),
572-
[tmp] "+&d" (_old),
573-
[new] "+&d" (_new),
574-
[mask] "+&d" (mask),
575-
[count] "=a" (count)
576-
: [key] "%[count]" (key << 4),
577-
[default_key] "J" (PAGE_DEFAULT_KEY),
578-
[max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
579-
: "memory", "cc");
580-
disable_sacf_uaccess(sacf_flag);
581-
*(unsigned short *)uval = prev >> shift;
582-
if (!count)
583-
rc = -EAGAIN;
584-
return rc;
585-
}
586-
case 4: {
587-
unsigned int prev = old;
588-
589-
sacf_flag = enable_sacf_uaccess();
590-
asm_inline volatile(
591-
" spka 0(%[key])\n"
592-
" sacf 256\n"
593-
"0: cs %[prev],%[new],%[address]\n"
594-
"1: sacf 768\n"
595-
" spka %[default_key]\n"
596-
EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
597-
EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
598-
: [rc] "+&d" (rc),
599-
[prev] "+&d" (prev),
600-
[address] "+Q" (*(int *)address)
601-
: [new] "d" ((unsigned int)new),
602-
[key] "a" (key << 4),
603-
[default_key] "J" (PAGE_DEFAULT_KEY)
604-
: "memory", "cc");
605-
disable_sacf_uaccess(sacf_flag);
606-
*(unsigned int *)uval = prev;
607-
return rc;
608-
}
609-
case 8: {
610-
unsigned long prev = old;
611-
612-
sacf_flag = enable_sacf_uaccess();
613-
asm_inline volatile(
614-
" spka 0(%[key])\n"
615-
" sacf 256\n"
616-
"0: csg %[prev],%[new],%[address]\n"
617-
"1: sacf 768\n"
618-
" spka %[default_key]\n"
619-
EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
620-
EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
621-
: [rc] "+&d" (rc),
622-
[prev] "+&d" (prev),
623-
[address] "+QS" (*(long *)address)
624-
: [new] "d" ((unsigned long)new),
625-
[key] "a" (key << 4),
626-
[default_key] "J" (PAGE_DEFAULT_KEY)
627-
: "memory", "cc");
628-
disable_sacf_uaccess(sacf_flag);
629-
*(unsigned long *)uval = prev;
630-
return rc;
631-
}
632-
case 16: {
633-
__uint128_t prev = old;
634-
635-
sacf_flag = enable_sacf_uaccess();
636-
asm_inline volatile(
637-
" spka 0(%[key])\n"
638-
" sacf 256\n"
639-
"0: cdsg %[prev],%[new],%[address]\n"
640-
"1: sacf 768\n"
641-
" spka %[default_key]\n"
642-
EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev])
643-
EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev])
644-
: [rc] "+&d" (rc),
645-
[prev] "+&d" (prev),
646-
[address] "+QS" (*(__int128_t *)address)
647-
: [new] "d" (new),
648-
[key] "a" (key << 4),
649-
[default_key] "J" (PAGE_DEFAULT_KEY)
650-
: "memory", "cc");
651-
disable_sacf_uaccess(sacf_flag);
652-
*(__uint128_t *)uval = prev;
653-
return rc;
654-
}
492+
case 1: return __cmpxchg_user_key1(address, uval, old, new, key);
493+
case 2: return __cmpxchg_user_key2(address, uval, old, new, key);
494+
case 4: return __cmpxchg_user_key4(address, uval, old, new, key);
495+
case 8: return __cmpxchg_user_key8(address, uval, old, new, key);
496+
case 16: return __cmpxchg_user_key16(address, uval, old, new, key);
497+
default: __cmpxchg_user_key_called_with_bad_pointer();
655498
}
656-
__cmpxchg_user_key_called_with_bad_pointer();
657-
return rc;
499+
return 0;
658500
}
659501

660502
/**
@@ -686,8 +528,8 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
686528
BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval))); \
687529
might_fault(); \
688530
__chk_user_ptr(__ptr); \
689-
__cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \
690-
(old), (new), (key), sizeof(*(__ptr))); \
531+
_cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \
532+
(old), (new), (key), sizeof(*(__ptr))); \
691533
})
692534

693535
#endif /* __S390_UACCESS_H */

arch/s390/kernel/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
4141
obj-y += debug.o irq.o ipl.o dis.o vdso.o cpufeature.o
4242
obj-y += sysinfo.o lgr.o os_info.o ctlreg.o
4343
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
44-
obj-y += entry.o reipl.o kdebugfs.o alternative.o
44+
obj-y += entry.o reipl.o kdebugfs.o alternative.o skey.o
4545
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
4646
obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o wti.o
4747
obj-y += diag/

arch/s390/kernel/skey.c

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
#include <asm/rwonce.h>
4+
#include <asm/page.h>
5+
#include <asm/skey.h>
6+
7+
int skey_regions_initialized;
8+
9+
static inline unsigned long load_real_address(unsigned long address)
10+
{
11+
unsigned long real;
12+
13+
asm volatile(
14+
" lra %[real],0(%[address])\n"
15+
: [real] "=d" (real)
16+
: [address] "a" (address)
17+
: "cc");
18+
return real;
19+
}
20+
21+
/*
22+
* Initialize storage keys of registered memory regions with the
23+
* default key. This is useful for code which is executed with a
24+
* non-default access key.
25+
*/
26+
void __skey_regions_initialize(void)
27+
{
28+
unsigned long address, real;
29+
struct skey_region *r, *end;
30+
31+
r = __skey_region_start;
32+
end = __skey_region_end;
33+
while (r < end) {
34+
address = r->start & PAGE_MASK;
35+
do {
36+
real = load_real_address(address);
37+
page_set_storage_key(real, PAGE_DEFAULT_KEY, 1);
38+
address += PAGE_SIZE;
39+
} while (address < r->end);
40+
r++;
41+
}
42+
/*
43+
* Make sure storage keys are initialized before
44+
* skey_regions_initialized is changed.
45+
*/
46+
barrier();
47+
WRITE_ONCE(skey_regions_initialized, 1);
48+
}

arch/s390/kernel/vmlinux.lds.S

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,13 @@ SECTIONS
7171
. = ALIGN(PAGE_SIZE);
7272
__end_ro_after_init = .;
7373

74+
. = ALIGN(8);
75+
.skey_region_table : {
76+
__skey_region_start = .;
77+
KEEP(*(.skey_region))
78+
__skey_region_end = .;
79+
}
80+
7481
.data.rel.ro : {
7582
*(.data.rel.ro .data.rel.ro.*)
7683
}

0 commit comments

Comments
 (0)