|
| 1 | +; RUN: opt -passes=asan %s -S | FileCheck %s |
| 2 | + |
| 3 | +;; Punt AddressSanitizer::instrumentMemIntrinsics out for MemIntrinsics |
| 4 | +;; that need to write to unsupported address spaces on X86 |
| 5 | +;; PR124238: https://www.github.com/llvm/llvm-project/issues/124238 |
| 6 | + |
| 7 | +target triple = "x86_64-unknown-linux-gnu" |
| 8 | + |
| 9 | +$.str.658906a285b7a0f82dabd9915e07848c = comdat any |
| 10 | +@.str = internal constant { [2 x i8], [30 x i8] } { [2 x i8] c"x\00", [30 x i8] zeroinitializer }, comdat($.str.658906a285b7a0f82dabd9915e07848c), align 32 |
| 11 | +@0 = private alias { [2 x i8], [30 x i8] }, ptr @.str |
| 12 | + |
| 13 | +define void @test_memcpy(i64 noundef %addr) sanitize_address #0 { |
| 14 | +entry: |
| 15 | + %addr.addr = alloca i64, align 8 |
| 16 | + store i64 %addr, ptr %addr.addr, align 8 |
| 17 | + %0 = load i64, ptr %addr.addr, align 8 |
| 18 | + %1 = inttoptr i64 %0 to ptr addrspace(257) |
| 19 | + call void @llvm.memcpy.p257.p0.i64(ptr addrspace(257) align 1 %1, ptr align 1 @.str, i64 1, i1 false) |
| 20 | +; CHECK: llvm.memcpy |
| 21 | + %2 = load i64, ptr %addr.addr, align 8 |
| 22 | + %3 = inttoptr i64 %2 to ptr addrspace(256) |
| 23 | + call void @llvm.memcpy.p256.p0.i64(ptr addrspace(256) align 1 %3, ptr align 1 @.str, i64 1, i1 false) |
| 24 | +; CHECK: llvm.memcpy |
| 25 | + %4 = load i64, ptr %addr.addr, align 8 |
| 26 | + %5 = inttoptr i64 %2 to ptr addrspace(258) |
| 27 | + call void @llvm.memcpy.p258.p0.i64(ptr addrspace(258) align 1 %5, ptr align 1 @.str, i64 1, i1 false) |
| 28 | +; CHECK: __asan_memcpy |
| 29 | + %6 = load i64, ptr %addr.addr, align 8 |
| 30 | + %7 = inttoptr i64 %2 to ptr addrspace(0) |
| 31 | + call void @llvm.memcpy.p258.p0.i64(ptr addrspace(0) align 1 %7, ptr align 1 @.str, i64 1, i1 false) |
| 32 | +; CHECK: __asan_memcpy |
| 33 | + ret void |
| 34 | +} |
| 35 | + |
| 36 | +define void @test_memset(i64 noundef %addr) sanitize_address #0 { |
| 37 | +entry: |
| 38 | + %addr.addr = alloca i64, align 8 |
| 39 | + store i64 %addr, ptr %addr.addr, align 8 |
| 40 | + %0 = load i64, ptr %addr.addr, align 8 |
| 41 | + %1 = inttoptr i64 %0 to ptr addrspace(257) |
| 42 | + call void @llvm.memset.p257.i64(ptr addrspace(257) align 1 %1, i8 0, i64 1, i1 false) |
| 43 | +; CHECK: llvm.memset |
| 44 | + %2 = load i64, ptr %addr.addr, align 8 |
| 45 | + %3 = inttoptr i64 %2 to ptr addrspace(256) |
| 46 | + call void @llvm.memset.p256.i64(ptr addrspace(256) align 1 %3, i8 0, i64 1, i1 false) |
| 47 | +; CHECK: llvm.memset |
| 48 | + %4 = load i64, ptr %addr.addr, align 8 |
| 49 | + %5 = inttoptr i64 %2 to ptr addrspace(258) |
| 50 | + call void @llvm.memset.p258.i64(ptr addrspace(258) align 1 %5, i8 0, i64 1, i1 false) |
| 51 | +; CHECK: __asan_memset |
| 52 | + %6 = load i64, ptr %addr.addr, align 8 |
| 53 | + %7 = inttoptr i64 %2 to ptr addrspace(0) |
| 54 | + call void @llvm.memset.p258.i64(ptr addrspace(0) align 1 %7, i8 0, i64 1, i1 false) |
| 55 | +; CHECK: __asan_memset |
| 56 | + ret void |
| 57 | +} |
| 58 | + |
| 59 | +define void @test_memmove(i64 noundef %addr) sanitize_address #0 { |
| 60 | +entry: |
| 61 | + %addr.addr = alloca i64, align 8 |
| 62 | + store i64 %addr, ptr %addr.addr, align 8 |
| 63 | + %0 = load i64, ptr %addr.addr, align 8 |
| 64 | + %1 = inttoptr i64 %0 to ptr addrspace(257) |
| 65 | + %2 = load i64, ptr %addr.addr, align 8 |
| 66 | + %3 = inttoptr i64 %2 to ptr |
| 67 | + call void @llvm.memmove.p257.p0.i64(ptr addrspace(257) align 1 %1, ptr align 1 %3, i64 1, i1 false) |
| 68 | +; CHECK: llvm.memmove |
| 69 | + %4 = load i64, ptr %addr.addr, align 8 |
| 70 | + %5 = inttoptr i64 %4 to ptr addrspace(256) |
| 71 | + %6 = load i64, ptr %addr.addr, align 8 |
| 72 | + %7 = inttoptr i64 %6 to ptr |
| 73 | + call void @llvm.memmove.p256.p0.i64(ptr addrspace(256) align 1 %5, ptr align 1 %7, i64 1, i1 false) |
| 74 | +; CHECK: llvm.memmove |
| 75 | + %8 = load i64, ptr %addr.addr, align 8 |
| 76 | + %9 = inttoptr i64 %4 to ptr addrspace(258) |
| 77 | + %10 = load i64, ptr %addr.addr, align 8 |
| 78 | + %11 = inttoptr i64 %6 to ptr |
| 79 | + call void @llvm.memmove.p256.p0.i64(ptr addrspace(258) align 1 %9, ptr align 1 %11, i64 1, i1 false) |
| 80 | +; CHECK: __asan_memmove |
| 81 | + %12 = load i64, ptr %addr.addr, align 8 |
| 82 | + %13 = inttoptr i64 %4 to ptr addrspace(0) |
| 83 | + %14 = load i64, ptr %addr.addr, align 8 |
| 84 | + %15 = inttoptr i64 %6 to ptr |
| 85 | + call void @llvm.memmove.p256.p0.i64(ptr addrspace(0) align 1 %13, ptr align 1 %15, i64 1, i1 false) |
| 86 | +; CHECK: __asan_memmove |
| 87 | + ret void |
| 88 | +} |
0 commit comments