Skip to content

Commit 3c50026

Browse files
Coreforgegeerlingguy
authored andcommitted
some alignment trapping, still wip
1 parent 88687a9 commit 3c50026

File tree

3 files changed

+322
-1
lines changed

3 files changed

+322
-1
lines changed

arch/arm64/include/asm/exception.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs)
6868
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr);
6969
void do_el0_cp15(unsigned long esr, struct pt_regs *regs);
7070
int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs);
71+
int do_alignment_fixup(unsigned long addr, struct pt_regs *regs);
7172
void do_el0_svc(struct pt_regs *regs);
7273
void do_el0_svc_compat(struct pt_regs *regs);
7374
void do_el0_fpac(struct pt_regs *regs, unsigned long esr);

arch/arm64/kernel/compat_alignment.c

Lines changed: 312 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -318,7 +318,7 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
318318
int thumb2_32b = 0;
319319

320320
instrptr = instruction_pointer(regs);
321-
321+
printk("Alignment fixup\n");
322322
if (compat_thumb_mode(regs)) {
323323
__le16 __user *ptr = (__le16 __user *)(instrptr & ~1);
324324
u16 tinstr, tinst2;
@@ -381,3 +381,314 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
381381

382382
return 0;
383383
}
384+
385+
// arm64#
386+
387+
/*
388+
*Happens with The Long Dark
389+
*
390+
*[ 6012.660803] Faulting instruction: 0x3d800020
391+
[ 6012.660813] Load/Store: op0 0x3 op1 0x1 op2 0x3 op3 0x0 op4 0x0
392+
*/
393+
394+
struct fixupDescription{
395+
void* addr;
396+
//
397+
u64 data1;
398+
u64 data1_simd;
399+
u64 data2;
400+
u64 data2_simd;
401+
402+
int Rs; // used for atomics (which don't get handled atomically)
403+
404+
int simd; // wether or not this is a vector instruction
405+
int load; // 1 is it's a load, 0 if it's a store
406+
int pair; // 1 if it's a l/s pair instruction
407+
int width; // width of the access in bits
408+
};
409+
410+
static int alignment_get_arm64(struct pt_regs *regs, __le64 __user *ip, u32 *inst)
411+
{
412+
__le32 instr = 0;
413+
int fault;
414+
415+
fault = get_user(instr, ip);
416+
if (fault)
417+
return fault;
418+
419+
*inst = __le32_to_cpu(instr);
420+
return 0;
421+
}
422+
423+
/*int ldpstp_offset_fixup(u32 instr, struct pt_regs *regs){
424+
uint8_t load = (instr >> 22) & 1;
425+
uint8_t simd = (instr >> 26) & 1;
426+
uint16_t imm7 = (instr >> 15) & 0x7f;
427+
uint8_t Rt2 = (instr >> 10) & 0x1f;
428+
uint8_t Rn = (instr >> 5) & 0x1f;
429+
uint8_t Rt = instr & 0x1f;
430+
431+
int16_t imm = 0xffff & imm7;
432+
printk("Variant: 0x%x Load: %x SIMD: %x IMM: 0x%x Rt: 0x%x Rt2: 0x%x Rn: 0x%x\n", ((instr >> 30) & 3),load, simd, imm, Rt, Rt2, Rn);
433+
if(((instr >> 30) & 3) == 2){
434+
// 64bit
435+
if(!load){
436+
if(!simd){
437+
// 64bit store
438+
u64 val1, val2;
439+
val1 = regs->regs[Rt];
440+
val2 = regs->regs[Rt2];
441+
u64 addr = regs->regs[Rn] + imm;
442+
printk("STP 64bit storing 0x%llx 0x%llx at 0x%llx\n", val1, val2, addr);
443+
// for the first reg. Byte by byte to avoid any alignment issues
444+
for(int i = 0; i < 8; i++){
445+
uint8_t v = (val1 >> (i*8)) & 0xff;
446+
put_user(v, (uint8_t __user *)addr);
447+
addr++;
448+
}
449+
// second reg
450+
for(int i = 0; i < 8; i++){
451+
uint8_t v = (val2 >> (i*8)) & 0xff;
452+
put_user(v, (uint8_t __user *)addr);
453+
addr++;
454+
}
455+
arm64_skip_faulting_instruction(regs, 4);
456+
}
457+
}
458+
}
459+
return 0;
460+
}*/
461+
462+
int do_ls_fixup(u32 instr, struct pt_regs *regs, struct fixupDescription* desc){
463+
int r;
464+
if(!desc->load){
465+
uint8_t* addr = desc->addr;
466+
int bcount = desc->width / 8; // since the field stores the width in bits. Honestly, there's no particular reason for that
467+
printk("Storing %d bytes (pair: %d) to 0x%llx",bcount, desc->pair, desc->addr);
468+
for(int i = 0; i < bcount; i++){
469+
if((r=put_user(desc->data1 & 0xff, (uint8_t __user *)addr)))
470+
return r;
471+
desc->data1 >>= 8;
472+
addr++;
473+
}
474+
475+
if(desc->pair){
476+
for(int i = 0; i < bcount; i++){
477+
if((r=put_user(desc->data2 & 0xff, (uint8_t __user *)addr)))
478+
return r;
479+
desc->data2 >>= 8;
480+
addr++;
481+
}
482+
}
483+
arm64_skip_faulting_instruction(regs, 4);
484+
} else {
485+
printk("Loading is currently not implemented (addr 0x%llx)\n", desc->addr);
486+
return -1;
487+
}
488+
return 0;
489+
}
490+
491+
int ls_cas_fixup(u32 instr, struct pt_regs *regs, struct fixupDescription* desc){
492+
uint8_t size = (instr >> 30) & 3;
493+
uint8_t load = (instr >> 22) & 1; // acquire semantics, has no effect here, since it's not atomic anymore
494+
uint8_t Rs = (instr >> 16) & 0x1f;
495+
uint8_t Rt2 = (instr >> 10) & 0x1f;
496+
uint8_t Rn = (instr >> 5) & 0x1f;
497+
uint8_t Rt = instr & 0x1f;
498+
499+
uint8_t o0 = (instr >> 15) & 1; // L, release semantics, has no effect here, since it's not atomic anymore
500+
501+
if(Rt2 != 0x1f){
502+
return -1;
503+
}
504+
505+
switch(size){
506+
case 0:
507+
desc->width = 8;
508+
break;
509+
case 1:
510+
desc->width = 16;
511+
break;
512+
case 2:
513+
desc->width = 32;
514+
break;
515+
case 3:
516+
desc->width = 64;
517+
break;
518+
}
519+
520+
desc->addr = (void*)regs->regs[Rn];
521+
desc->data1 = regs->regs[Rt];
522+
523+
// nearly everything from here on could be moved into another function if needed
524+
u64 cmpmask = (1 << desc->width) - 1;
525+
u64 cmpval = regs->regs[Rs] & cmpmask;
526+
527+
u64 readval = 0;
528+
int bcount = desc->width / 8;
529+
u64 addr = desc->addr;
530+
int r;
531+
uint8_t tmp;
532+
533+
printk("Atomic CAS not being done atomically at 0x%llx, size %d\n",desc->addr, desc->width);
534+
535+
for(int i = 0; i < bcount; i++){
536+
if((r=get_user(tmp, (uint8_t __user *)addr)))
537+
return r;
538+
readval |= tmp;
539+
readval <<= 8; // maybe this could be read directly into regs->regs[Rs]
540+
addr++;
541+
}
542+
543+
if((readval & cmpmask) == cmpval){
544+
// swap
545+
addr = (u64)desc->addr;
546+
547+
for(int i = 0; i < bcount; i++){
548+
if((r=put_user(desc->data1 & 0xff, (uint8_t __user *)addr)))
549+
return r;
550+
desc->data1 >>= 8;
551+
addr++;
552+
}
553+
554+
regs->regs[Rs] = readval;
555+
}
556+
557+
arm64_skip_faulting_instruction(regs, 4);
558+
559+
return 0;
560+
}
561+
562+
int ls_pair_fixup(u32 instr, struct pt_regs *regs, struct fixupDescription* desc){
563+
uint8_t op2;
564+
uint8_t opc;
565+
op2 = (instr >> 23) & 3;
566+
opc = (instr >> 30) & 3;
567+
568+
uint8_t load = (instr >> 22) & 1;
569+
uint8_t simd = (instr >> 26) & 1;
570+
uint16_t imm7 = (instr >> 15) & 0x7f;
571+
uint8_t Rt2 = (instr >> 10) & 0x1f;
572+
uint8_t Rn = (instr >> 5) & 0x1f;
573+
uint8_t Rt = instr & 0x1f;
574+
575+
int16_t imm = 0xffff & imm7;
576+
577+
desc->load = load;
578+
desc->simd = simd;
579+
580+
// opc controls the width
581+
switch(opc){
582+
case 0:
583+
desc->width = 32;
584+
imm <<= 2;
585+
break;
586+
case 2:
587+
desc->width = 64;
588+
imm <<= 3;
589+
break;
590+
default:
591+
return -1;
592+
}
593+
594+
// op2 controls the indexing
595+
switch(op2){
596+
case 2:
597+
// offset
598+
desc->addr = (void*)(regs->regs[Rn] + imm);
599+
break;
600+
default:
601+
return -1;
602+
}
603+
desc->data1 = regs->regs[Rt];
604+
desc->data2 = regs->regs[Rt2];
605+
606+
return do_ls_fixup(instr, regs, desc);
607+
608+
}
609+
610+
int ls_reg_unsigned_imm(u32 instr, struct pt_regs *regs, struct fixupDescription* desc){
611+
uint8_t size = (instr >> 30) & 3;
612+
uint8_t simd = (instr >> 26) & 1;
613+
uint8_t opc = (instr >> 22) & 3;
614+
615+
switch(size){
616+
case 0:
617+
desc->width = 8;
618+
break;
619+
case 1:
620+
desc->width = 16;
621+
break;
622+
case 2:
623+
desc->width = 32;
624+
break;
625+
case 3:
626+
desc->width = 64;
627+
break;
628+
}
629+
return 0;
630+
}
631+
632+
int ls_fixup(u32 instr, struct pt_regs *regs, struct fixupDescription* desc){
633+
uint8_t op0;
634+
uint8_t op1;
635+
uint8_t op2;
636+
uint8_t op3;
637+
uint8_t op4;
638+
639+
op0 = (instr >> 28) & 0xf;
640+
op1 = (instr >> 26) & 1;
641+
op2 = (instr >> 23) & 3;
642+
op3 = (instr >> 16) & 0x3f;
643+
op4 = (instr >> 10) & 3;
644+
printk("Load/Store: op0 0x%x op1 0x%x op2 0x%x op3 0x%x op4 0x%x\n", op0, op1, op2, op3, op4);
645+
if((op0 & 3) == 2){
646+
desc->pair = 1;
647+
return ls_pair_fixup(instr, regs, desc);
648+
}
649+
if((op0 & 3) == 0 && op1 == 0 && op2 == 1 && (op3 & 0x20) == 0x20){
650+
// compare and swap
651+
return ls_cas_fixup(instr, regs, desc);
652+
}
653+
if((op0 & 3) == 3 && (op2 & 3) == 3){
654+
//load/store unsigned immediate
655+
desc->pair = 0;
656+
657+
}
658+
if((op0 & 3) == 2 && (op2 == 2)){
659+
// Load/store pair offset
660+
//ldpstp_offset_fixup(instr, regs);
661+
return ls_reg_unsigned_imm(instr, regs, desc);
662+
}
663+
return 0;
664+
}
665+
666+
int do_alignment_fixup(unsigned long addr, struct pt_regs *regs){
667+
unsigned long long instrptr;
668+
u32 instr = 0;
669+
670+
instrptr = instruction_pointer(regs);
671+
printk("Alignment fixup\n");
672+
673+
if (alignment_get_arm64(regs, (__le64 __user *)instrptr, &instr)){
674+
printk("Failed to get aarch64 instruction\n");
675+
return 1;
676+
}
677+
printk("Faulting instruction: 0x%lx\n", instr);
678+
/**
679+
* List of seen faults: 020c00a9 (0xa9000c02) stp x2, x3, [x0]
680+
*
681+
*/
682+
683+
uint8_t op0;
684+
struct fixupDescription desc = {0};
685+
686+
op0 = ((instr & 0x1E000000) >> 25);
687+
if((op0 & 5) == 0x4){
688+
printk("Load/Store\n");
689+
return ls_fixup(instr, regs, &desc);
690+
} else {
691+
printk("Not handling instruction with op0 0x%x ",op0);
692+
}
693+
return -1;
694+
}

arch/arm64/mm/fault.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
#include <linux/perf_event.h>
2626
#include <linux/preempt.h>
2727
#include <linux/hugetlb.h>
28+
#include <linux/nmi.h>
2829

2930
#include <asm/acpi.h>
3031
#include <asm/bug.h>
@@ -674,6 +675,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
674675
* We had some memory, but were unable to successfully fix up
675676
* this page fault.
676677
*/
678+
printk("Page fault bus error\n");
677679
arm64_force_sig_fault(SIGBUS, BUS_ADRERR, far, inf->name);
678680
} else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) {
679681
unsigned int lsb;
@@ -716,9 +718,16 @@ static int __kprobes do_translation_fault(unsigned long far,
716718
static int do_alignment_fault(unsigned long far, unsigned long esr,
717719
struct pt_regs *regs)
718720
{
721+
//printk("Alignment fault: fixup enabled?: %d, user mode: %d pstate: 0x%llx\n", IS_ENABLED(CONFIG_COMPAT_ALIGNMENT_FIXUPS), compat_user_mode(regs), regs->pstate);
722+
trigger_all_cpu_backtrace();
719723
if (IS_ENABLED(CONFIG_COMPAT_ALIGNMENT_FIXUPS) &&
720724
compat_user_mode(regs))
721725
return do_compat_alignment_fixup(far, regs);
726+
727+
if(user_mode(regs)){
728+
// aarch64 user mode
729+
return do_alignment_fixup(far, regs);
730+
}
722731
do_bad_area(far, esr, regs);
723732
return 0;
724733
}

0 commit comments

Comments
 (0)