@@ -744,4 +744,61 @@ l0_%=: r0 = 0; \
744
744
" ::: __clobber_all );
745
745
}
746
746
747
+ SEC ("socket" )
748
+ __description ("unpriv: Spectre v1 path-based type confusion of scalar as stack-ptr" )
749
+ __success __success_unpriv __retval (0 )
750
+ #ifdef SPEC_V1
751
+ __xlated_unpriv ("if r0 != 0x1 goto pc+2" )
752
+ /* This nospec prevents the exploit because it forces the mispredicted (not
753
+ * taken) `if r0 != 0x0 goto l0_%=` to resolve before using r6 as a pointer.
754
+ * This causes the CPU to realize that `r6 = r9` should have never executed. It
755
+ * ensures that r6 always contains a readable stack slot ptr when the insn after
756
+ * the nospec executes.
757
+ */
758
+ __xlated_unpriv ("nospec" )
759
+ __xlated_unpriv ("r9 = *(u8 *)(r6 +0)" )
760
+ #endif
761
+ __naked void unpriv_spec_v1_type_confusion (void )
762
+ {
763
+ asm volatile (" \
764
+ r1 = 0; \
765
+ *(u64*)(r10 - 8) = r1; \
766
+ r2 = r10; \
767
+ r2 += -8; \
768
+ r1 = %[map_hash_8b] ll; \
769
+ call %[bpf_map_lookup_elem]; \
770
+ if r0 == 0 goto l2_%=; \
771
+ /* r0: pointer to a map array entry */ \
772
+ r2 = r10; \
773
+ r2 += -8; \
774
+ r1 = %[map_hash_8b] ll; \
775
+ /* r1, r2: prepared call args */ \
776
+ r6 = r10; \
777
+ r6 += -8; \
778
+ /* r6: pointer to readable stack slot */ \
779
+ r9 = 0xffffc900; \
780
+ r9 <<= 32; \
781
+ /* r9: scalar controlled by attacker */ \
782
+ r0 = *(u64 *)(r0 + 0); /* cache miss */ \
783
+ if r0 != 0x0 goto l0_%=; \
784
+ r6 = r9; \
785
+ l0_%=: if r0 != 0x1 goto l1_%=; \
786
+ r9 = *(u8 *)(r6 + 0); \
787
+ l1_%=: /* leak r9 */ \
788
+ r9 &= 1; \
789
+ r9 <<= 9; \
790
+ *(u64*)(r10 - 8) = r9; \
791
+ call %[bpf_map_lookup_elem]; \
792
+ if r0 == 0 goto l2_%=; \
793
+ /* leak secret into is_cached(map[0|512]): */ \
794
+ r0 = *(u64 *)(r0 + 0); \
795
+ l2_%=: \
796
+ r0 = 0; \
797
+ exit; \
798
+ " :
799
+ : __imm (bpf_map_lookup_elem ),
800
+ __imm_addr (map_hash_8b )
801
+ : __clobber_all );
802
+ }
803
+
747
804
char _license [] SEC ("license" ) = "GPL" ;
0 commit comments