|
3 | 3 |
|
4 | 4 | target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
|
5 | 5 |
|
| 6 | +@glob = external global i32 |
| 7 | + |
6 | 8 | define <16 x i1> @v16i1_0() {
|
7 | 9 | ; CHECK-LABEL: @v16i1_0(
|
8 | 10 | ; CHECK-NEXT: entry:
|
@@ -337,6 +339,41 @@ entry:
|
337 | 339 | ret <vscale x 16 x i1> %mask
|
338 | 340 | }
|
339 | 341 |
|
| 342 | + |
| 343 | +define <vscale x 16 x i1> @nxv16i1_0_constexpr() { |
| 344 | +; CHECK-LABEL: @nxv16i1_0_constexpr( |
| 345 | +; CHECK-NEXT: entry: |
| 346 | +; CHECK-NEXT: [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 ptrtoint (ptr @glob to i64)) |
| 347 | +; CHECK-NEXT: ret <vscale x 16 x i1> [[MASK]] |
| 348 | +; |
| 349 | +entry: |
| 350 | + %mask = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 ptrtoint (ptr @glob to i64)) |
| 351 | + ret <vscale x 16 x i1> %mask |
| 352 | +} |
| 353 | + |
| 354 | +define <vscale x 16 x i1> @nxv16i1_constexpr_0() { |
| 355 | +; CHECK-LABEL: @nxv16i1_constexpr_0( |
| 356 | +; CHECK-NEXT: entry: |
| 357 | +; CHECK-NEXT: [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 ptrtoint (ptr @glob to i64), i64 0) |
| 358 | +; CHECK-NEXT: ret <vscale x 16 x i1> [[MASK]] |
| 359 | +; |
| 360 | +entry: |
| 361 | + %mask = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 ptrtoint (ptr @glob to i64), i64 0) |
| 362 | + ret <vscale x 16 x i1> %mask |
| 363 | +} |
| 364 | + |
| 365 | +define <vscale x 16 x i1> @nxv16i1_constexpr_constexpr() { |
| 366 | +; CHECK-LABEL: @nxv16i1_constexpr_constexpr( |
| 367 | +; CHECK-NEXT: entry: |
| 368 | +; CHECK-NEXT: [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 ptrtoint (ptr @glob to i64), i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr @glob, i64 2) to i64)) |
| 369 | +; CHECK-NEXT: ret <vscale x 16 x i1> [[MASK]] |
| 370 | +; |
| 371 | +entry: |
| 372 | + %mask = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 ptrtoint (ptr @glob to i64), i64 ptrtoint (ptr getelementptr inbounds nuw (i8, ptr @glob, i64 2) to i64)) |
| 373 | + ret <vscale x 16 x i1> %mask |
| 374 | +} |
| 375 | + |
| 376 | + |
340 | 377 | declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
|
341 | 378 | declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
|
342 | 379 | declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
|
|
0 commit comments