Skip to content

Commit ffecb24

Browse files
committed
fixup! [mlir][vector] Add more tests for ConvertVectorToLLVM (8/n)
Create a dedicated file for xfer tests
1 parent b260218 commit ffecb24

File tree

2 files changed

+1
-377
lines changed

2 files changed

+1
-377
lines changed

mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir

Lines changed: 0 additions & 377 deletions
Original file line numberDiff line numberDiff line change
@@ -2377,383 +2377,6 @@ func.func @matrix_ops_index(%A: vector<64xindex>, %B: vector<48xindex>) -> vecto
23772377

23782378
// -----
23792379

2380-
func.func @transfer_read_write_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
2381-
%f7 = arith.constant 7.0: f32
2382-
%f = vector.transfer_read %A[%base], %f7
2383-
{permutation_map = affine_map<(d0) -> (d0)>} :
2384-
memref<?xf32>, vector<17xf32>
2385-
vector.transfer_write %f, %A[%base]
2386-
{permutation_map = affine_map<(d0) -> (d0)>} :
2387-
vector<17xf32>, memref<?xf32>
2388-
return %f: vector<17xf32>
2389-
}
2390-
// CHECK-LABEL: func @transfer_read_write_1d
2391-
// CHECK-SAME: %[[MEM:.*]]: memref<?xf32>,
2392-
// CHECK-SAME: %[[BASE:.*]]: index) -> vector<17xf32>
2393-
// CHECK: %[[C7:.*]] = arith.constant 7.0
2394-
//
2395-
// 1. Let dim be the memref dimension, compute the in-bound index (dim - offset)
2396-
// CHECK: %[[C0:.*]] = arith.constant 0 : index
2397-
// CHECK: %[[DIM:.*]] = memref.dim %[[MEM]], %[[C0]] : memref<?xf32>
2398-
// CHECK: %[[BOUND:.*]] = arith.subi %[[DIM]], %[[BASE]] : index
2399-
//
2400-
// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
2401-
// CHECK: %[[linearIndex:.*]] = arith.constant dense
2402-
// CHECK-SAME: <[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]> :
2403-
// CHECK-SAME: vector<17xi32>
2404-
//
2405-
// 3. Create bound vector to compute in-bound mask:
2406-
// [ 0 .. vector_length - 1 ] < [ dim - offset .. dim - offset ]
2407-
// CHECK: %[[btrunc:.*]] = arith.index_cast %[[BOUND]] : index to i32
2408-
// CHECK: %[[boundVecInsert:.*]] = llvm.insertelement %[[btrunc]]
2409-
// CHECK: %[[boundVect:.*]] = llvm.shufflevector %[[boundVecInsert]]
2410-
// CHECK: %[[mask:.*]] = arith.cmpi slt, %[[linearIndex]], %[[boundVect]]
2411-
// CHECK-SAME: : vector<17xi32>
2412-
//
2413-
// 4. Create pass-through vector.
2414-
// CHECK: %[[PASS_THROUGH:.*]] = arith.constant dense<7.{{.*}}> : vector<17xf32>
2415-
//
2416-
// 5. Bitcast to vector form.
2417-
// CHECK: %[[gep:.*]] = llvm.getelementptr %{{.*}} :
2418-
// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr, f32
2419-
//
2420-
// 6. Rewrite as a masked read.
2421-
// CHECK: %[[loaded:.*]] = llvm.intr.masked.load %[[gep]], %[[mask]],
2422-
// CHECK-SAME: %[[PASS_THROUGH]] {alignment = 4 : i32} :
2423-
// CHECK-SAME: -> vector<17xf32>
2424-
//
2425-
// 1. Let dim be the memref dimension, compute the in-bound index (dim - offset)
2426-
// CHECK: %[[C0_b:.*]] = arith.constant 0 : index
2427-
// CHECK: %[[DIM_b:.*]] = memref.dim %[[MEM]], %[[C0_b]] : memref<?xf32>
2428-
// CHECK: %[[BOUND_b:.*]] = arith.subi %[[DIM_b]], %[[BASE]] : index
2429-
//
2430-
// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
2431-
// CHECK: %[[linearIndex_b:.*]] = arith.constant dense
2432-
// CHECK-SAME: <[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]> :
2433-
// CHECK-SAME: vector<17xi32>
2434-
//
2435-
// 3. Create bound vector to compute in-bound mask:
2436-
// [ 0 .. vector_length - 1 ] < [ dim - offset .. dim - offset ]
2437-
// CHECK: %[[btrunc_b:.*]] = arith.index_cast %[[BOUND_b]] : index to i32
2438-
// CHECK: %[[boundVecInsert_b:.*]] = llvm.insertelement %[[btrunc_b]]
2439-
// CHECK: %[[boundVect_b:.*]] = llvm.shufflevector %[[boundVecInsert_b]]
2440-
// CHECK: %[[mask_b:.*]] = arith.cmpi slt, %[[linearIndex_b]],
2441-
// CHECK-SAME: %[[boundVect_b]] : vector<17xi32>
2442-
//
2443-
// 4. Bitcast to vector form.
2444-
// CHECK: %[[gep_b:.*]] = llvm.getelementptr {{.*}} :
2445-
// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr, f32
2446-
//
2447-
// 5. Rewrite as a masked write.
2448-
// CHECK: llvm.intr.masked.store %[[loaded]], %[[gep_b]], %[[mask_b]]
2449-
// CHECK-SAME: {alignment = 4 : i32} :
2450-
// CHECK-SAME: vector<17xf32>, vector<17xi1> into !llvm.ptr
2451-
2452-
func.func @transfer_read_write_1d_scalable(%A : memref<?xf32>, %base: index) -> vector<[17]xf32> {
2453-
%f7 = arith.constant 7.0: f32
2454-
%f = vector.transfer_read %A[%base], %f7
2455-
{permutation_map = affine_map<(d0) -> (d0)>} :
2456-
memref<?xf32>, vector<[17]xf32>
2457-
vector.transfer_write %f, %A[%base]
2458-
{permutation_map = affine_map<(d0) -> (d0)>} :
2459-
vector<[17]xf32>, memref<?xf32>
2460-
return %f: vector<[17]xf32>
2461-
}
2462-
// CHECK-LABEL: func @transfer_read_write_1d_scalable
2463-
// CHECK-SAME: %[[MEM:.*]]: memref<?xf32>,
2464-
// CHECK-SAME: %[[BASE:.*]]: index) -> vector<[17]xf32>
2465-
// CHECK: %[[C7:.*]] = arith.constant 7.0
2466-
//
2467-
// 1. Let dim be the memref dimension, compute the in-bound index (dim - offset)
2468-
// CHECK: %[[C0:.*]] = arith.constant 0 : index
2469-
// CHECK: %[[DIM:.*]] = memref.dim %[[MEM]], %[[C0]] : memref<?xf32>
2470-
// CHECK: %[[BOUND:.*]] = arith.subi %[[DIM]], %[[BASE]] : index
2471-
//
2472-
// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
2473-
// CHECK: %[[linearIndex:.*]] = llvm.intr.stepvector : vector<[17]xi32>
2474-
//
2475-
// 3. Create bound vector to compute in-bound mask:
2476-
// [ 0 .. vector_length - 1 ] < [ dim - offset .. dim - offset ]
2477-
// CHECK: %[[btrunc:.*]] = arith.index_cast %[[BOUND]] : index to i32
2478-
// CHECK: %[[boundVecInsert:.*]] = llvm.insertelement %[[btrunc]]
2479-
// CHECK: %[[boundVect:.*]] = llvm.shufflevector %[[boundVecInsert]]
2480-
// CHECK: %[[mask:.*]] = arith.cmpi slt, %[[linearIndex]], %[[boundVect]]
2481-
// CHECK-SAME: : vector<[17]xi32>
2482-
//
2483-
// 4. Create pass-through vector.
2484-
// CHECK: %[[PASS_THROUGH:.*]] = arith.constant dense<7.{{.*}}> : vector<[17]xf32>
2485-
//
2486-
// 5. Bitcast to vector form.
2487-
// CHECK: %[[gep:.*]] = llvm.getelementptr %{{.*}} :
2488-
// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr, f32
2489-
//
2490-
// 6. Rewrite as a masked read.
2491-
// CHECK: %[[loaded:.*]] = llvm.intr.masked.load %[[gep]], %[[mask]],
2492-
// CHECK-SAME: %[[PASS_THROUGH]] {alignment = 4 : i32} :
2493-
// CHECK-SAME: -> vector<[17]xf32>
2494-
//
2495-
// 1. Let dim be the memref dimension, compute the in-bound index (dim - offset)
2496-
// CHECK: %[[C0_b:.*]] = arith.constant 0 : index
2497-
// CHECK: %[[DIM_b:.*]] = memref.dim %[[MEM]], %[[C0_b]] : memref<?xf32>
2498-
// CHECK: %[[BOUND_b:.*]] = arith.subi %[[DIM_b]], %[[BASE]] : index
2499-
//
2500-
// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
2501-
// CHECK: %[[linearIndex_b:.*]] = llvm.intr.stepvector : vector<[17]xi32>
2502-
//
2503-
// 3. Create bound vector to compute in-bound mask:
2504-
// [ 0 .. vector_length - 1 ] < [ dim - offset .. dim - offset ]
2505-
// CHECK: %[[btrunc_b:.*]] = arith.index_cast %[[BOUND_b]] : index to i32
2506-
// CHECK: %[[boundVecInsert_b:.*]] = llvm.insertelement %[[btrunc_b]]
2507-
// CHECK: %[[boundVect_b:.*]] = llvm.shufflevector %[[boundVecInsert_b]]
2508-
// CHECK: %[[mask_b:.*]] = arith.cmpi slt, %[[linearIndex_b]],
2509-
// CHECK-SAME: %[[boundVect_b]] : vector<[17]xi32>
2510-
//
2511-
// 4. Bitcast to vector form.
2512-
// CHECK: %[[gep_b:.*]] = llvm.getelementptr {{.*}} :
2513-
// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr, f32
2514-
//
2515-
// 5. Rewrite as a masked write.
2516-
// CHECK: llvm.intr.masked.store %[[loaded]], %[[gep_b]], %[[mask_b]]
2517-
// CHECK-SAME: {alignment = 4 : i32} :
2518-
// CHECK-SAME: vector<[17]xf32>, vector<[17]xi1> into !llvm.ptr
2519-
2520-
// -----
2521-
2522-
func.func @transfer_read_write_index_1d(%A : memref<?xindex>, %base: index) -> vector<17xindex> {
2523-
%f7 = arith.constant 7: index
2524-
%f = vector.transfer_read %A[%base], %f7
2525-
{permutation_map = affine_map<(d0) -> (d0)>} :
2526-
memref<?xindex>, vector<17xindex>
2527-
vector.transfer_write %f, %A[%base]
2528-
{permutation_map = affine_map<(d0) -> (d0)>} :
2529-
vector<17xindex>, memref<?xindex>
2530-
return %f: vector<17xindex>
2531-
}
2532-
// CHECK-LABEL: func @transfer_read_write_index_1d
2533-
// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: index) -> vector<17xindex>
2534-
// CHECK: %[[SPLAT:.*]] = arith.constant dense<7> : vector<17xindex>
2535-
// CHECK: %{{.*}} = builtin.unrealized_conversion_cast %[[SPLAT]] : vector<17xindex> to vector<17xi64>
2536-
2537-
// CHECK: %[[loaded:.*]] = llvm.intr.masked.load %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} :
2538-
// CHECK-SAME: (!llvm.ptr, vector<17xi1>, vector<17xi64>) -> vector<17xi64>
2539-
2540-
// CHECK: llvm.intr.masked.store %[[loaded]], %{{.*}}, %{{.*}} {alignment = 8 : i32} :
2541-
// CHECK-SAME: vector<17xi64>, vector<17xi1> into !llvm.ptr
2542-
2543-
func.func @transfer_read_write_index_1d_scalable(%A : memref<?xindex>, %base: index) -> vector<[17]xindex> {
2544-
%f7 = arith.constant 7: index
2545-
%f = vector.transfer_read %A[%base], %f7
2546-
{permutation_map = affine_map<(d0) -> (d0)>} :
2547-
memref<?xindex>, vector<[17]xindex>
2548-
vector.transfer_write %f, %A[%base]
2549-
{permutation_map = affine_map<(d0) -> (d0)>} :
2550-
vector<[17]xindex>, memref<?xindex>
2551-
return %f: vector<[17]xindex>
2552-
}
2553-
// CHECK-LABEL: func @transfer_read_write_index_1d
2554-
// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: index) -> vector<[17]xindex>
2555-
// CHECK: %[[SPLAT:.*]] = arith.constant dense<7> : vector<[17]xindex>
2556-
// CHECK: %{{.*}} = builtin.unrealized_conversion_cast %[[SPLAT]] : vector<[17]xindex> to vector<[17]xi64>
2557-
2558-
// CHECK: %[[loaded:.*]] = llvm.intr.masked.load %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} :
2559-
// CHECK-SAME: (!llvm.ptr, vector<[17]xi1>, vector<[17]xi64>) -> vector<[17]xi64>
2560-
2561-
// CHECK: llvm.intr.masked.store %[[loaded]], %{{.*}}, %{{.*}} {alignment = 8 : i32} :
2562-
// CHECK-SAME: vector<[17]xi64>, vector<[17]xi1> into !llvm.ptr
2563-
2564-
// -----
2565-
2566-
func.func @transfer_read_2d_to_1d(%A : memref<?x?xf32>, %base0: index, %base1: index) -> vector<17xf32> {
2567-
%f7 = arith.constant 7.0: f32
2568-
%f = vector.transfer_read %A[%base0, %base1], %f7
2569-
{permutation_map = affine_map<(d0, d1) -> (d1)>} :
2570-
memref<?x?xf32>, vector<17xf32>
2571-
return %f: vector<17xf32>
2572-
}
2573-
// CHECK-LABEL: func @transfer_read_2d_to_1d
2574-
// CHECK-SAME: %[[BASE_0:[a-zA-Z0-9]*]]: index, %[[BASE_1:[a-zA-Z0-9]*]]: index) -> vector<17xf32>
2575-
// CHECK: %[[c1:.*]] = arith.constant 1 : index
2576-
// CHECK: %[[DIM:.*]] = memref.dim %{{.*}}, %[[c1]] : memref<?x?xf32>
2577-
//
2578-
// Compute the in-bound index (dim - offset)
2579-
// CHECK: %[[BOUND:.*]] = arith.subi %[[DIM]], %[[BASE_1]] : index
2580-
//
2581-
// Create a vector with linear indices [ 0 .. vector_length - 1 ].
2582-
// CHECK: %[[linearIndex:.*]] = arith.constant dense
2583-
// CHECK-SAME: <[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]> :
2584-
// CHECK-SAME: vector<17xi32>
2585-
//
2586-
// Create bound vector to compute in-bound mask:
2587-
// [ 0 .. vector_length - 1 ] < [ dim - offset .. dim - offset ]
2588-
// CHECK: %[[btrunc:.*]] = arith.index_cast %[[BOUND]] : index to i32
2589-
// CHECK: %[[boundVecInsert:.*]] = llvm.insertelement %[[btrunc]]
2590-
// CHECK: %[[boundVect:.*]] = llvm.shufflevector %[[boundVecInsert]]
2591-
// CHECK: %[[mask:.*]] = arith.cmpi slt, %[[linearIndex]], %[[boundVect]]
2592-
2593-
func.func @transfer_read_2d_to_1d_scalable(%A : memref<?x?xf32>, %base0: index, %base1: index) -> vector<[17]xf32> {
2594-
%f7 = arith.constant 7.0: f32
2595-
%f = vector.transfer_read %A[%base0, %base1], %f7
2596-
{permutation_map = affine_map<(d0, d1) -> (d1)>} :
2597-
memref<?x?xf32>, vector<[17]xf32>
2598-
return %f: vector<[17]xf32>
2599-
}
2600-
// CHECK-LABEL: func @transfer_read_2d_to_1d
2601-
// CHECK-SAME: %[[BASE_0:[a-zA-Z0-9]*]]: index, %[[BASE_1:[a-zA-Z0-9]*]]: index) -> vector<[17]xf32>
2602-
// CHECK: %[[c1:.*]] = arith.constant 1 : index
2603-
// CHECK: %[[DIM:.*]] = memref.dim %{{.*}}, %[[c1]] : memref<?x?xf32>
2604-
//
2605-
// Compute the in-bound index (dim - offset)
2606-
// CHECK: %[[BOUND:.*]] = arith.subi %[[DIM]], %[[BASE_1]] : index
2607-
//
2608-
// Create a vector with linear indices [ 0 .. vector_length - 1 ].
2609-
// CHECK: %[[linearIndex:.*]] = llvm.intr.stepvector : vector<[17]xi32>
2610-
//
2611-
// Create bound vector to compute in-bound mask:
2612-
// [ 0 .. vector_length - 1 ] < [ dim - offset .. dim - offset ]
2613-
// CHECK: %[[btrunc:.*]] = arith.index_cast %[[BOUND]] : index to i32
2614-
// CHECK: %[[boundVecInsert:.*]] = llvm.insertelement %[[btrunc]]
2615-
// CHECK: %[[boundVect:.*]] = llvm.shufflevector %[[boundVecInsert]]
2616-
// CHECK: %[[mask:.*]] = arith.cmpi slt, %[[linearIndex]], %[[boundVect]]
2617-
2618-
// -----
2619-
2620-
func.func @transfer_read_write_1d_non_zero_addrspace(%A : memref<?xf32, 3>, %base: index) -> vector<17xf32> {
2621-
%f7 = arith.constant 7.0: f32
2622-
%f = vector.transfer_read %A[%base], %f7
2623-
{permutation_map = affine_map<(d0) -> (d0)>} :
2624-
memref<?xf32, 3>, vector<17xf32>
2625-
vector.transfer_write %f, %A[%base]
2626-
{permutation_map = affine_map<(d0) -> (d0)>} :
2627-
vector<17xf32>, memref<?xf32, 3>
2628-
return %f: vector<17xf32>
2629-
}
2630-
// CHECK-LABEL: func @transfer_read_write_1d_non_zero_addrspace
2631-
// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: index) -> vector<17xf32>
2632-
//
2633-
// 1. Check address space for GEP is correct.
2634-
// CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} :
2635-
// CHECK-SAME: (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f32
2636-
//
2637-
// 2. Check address space of the memref is correct.
2638-
// CHECK: %[[c0:.*]] = arith.constant 0 : index
2639-
// CHECK: %[[DIM:.*]] = memref.dim %{{.*}}, %[[c0]] : memref<?xf32, 3>
2640-
//
2641-
// 3. Check address space for GEP is correct.
2642-
// CHECK: %[[gep_b:.*]] = llvm.getelementptr {{.*}} :
2643-
// CHECK-SAME: (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f32
2644-
2645-
func.func @transfer_read_write_1d_non_zero_addrspace_scalable(%A : memref<?xf32, 3>, %base: index) -> vector<[17]xf32> {
2646-
%f7 = arith.constant 7.0: f32
2647-
%f = vector.transfer_read %A[%base], %f7
2648-
{permutation_map = affine_map<(d0) -> (d0)>} :
2649-
memref<?xf32, 3>, vector<[17]xf32>
2650-
vector.transfer_write %f, %A[%base]
2651-
{permutation_map = affine_map<(d0) -> (d0)>} :
2652-
vector<[17]xf32>, memref<?xf32, 3>
2653-
return %f: vector<[17]xf32>
2654-
}
2655-
// CHECK-LABEL: func @transfer_read_write_1d_non_zero_addrspace_scalable
2656-
// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: index) -> vector<[17]xf32>
2657-
//
2658-
// 1. Check address space for GEP is correct.
2659-
// CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} :
2660-
// CHECK-SAME: (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f32
2661-
//
2662-
// 2. Check address space of the memref is correct.
2663-
// CHECK: %[[c0:.*]] = arith.constant 0 : index
2664-
// CHECK: %[[DIM:.*]] = memref.dim %{{.*}}, %[[c0]] : memref<?xf32, 3>
2665-
//
2666-
// 3. Check address space for GEP is correct.
2667-
// CHECK: %[[gep_b:.*]] = llvm.getelementptr {{.*}} :
2668-
// CHECK-SAME: (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f32
2669-
2670-
// -----
2671-
2672-
func.func @transfer_read_1d_inbounds(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
2673-
%f7 = arith.constant 7.0: f32
2674-
%f = vector.transfer_read %A[%base], %f7 {in_bounds = [true]} :
2675-
memref<?xf32>, vector<17xf32>
2676-
return %f: vector<17xf32>
2677-
}
2678-
// CHECK-LABEL: func @transfer_read_1d_inbounds
2679-
// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: index) -> vector<17xf32>
2680-
//
2681-
// 1. Bitcast to vector form.
2682-
// CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} :
2683-
// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr, f32
2684-
//
2685-
// 2. Rewrite as a load.
2686-
// CHECK: %[[loaded:.*]] = llvm.load %[[gep]] {alignment = 4 : i64} : !llvm.ptr -> vector<17xf32>
2687-
2688-
func.func @transfer_read_1d_inbounds_scalable(%A : memref<?xf32>, %base: index) -> vector<[17]xf32> {
2689-
%f7 = arith.constant 7.0: f32
2690-
%f = vector.transfer_read %A[%base], %f7 {in_bounds = [true]} :
2691-
memref<?xf32>, vector<[17]xf32>
2692-
return %f: vector<[17]xf32>
2693-
}
2694-
// CHECK-LABEL: func @transfer_read_1d_inbounds_scalable
2695-
// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: index) -> vector<[17]xf32>
2696-
//
2697-
// 1. Bitcast to vector form.
2698-
// CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} :
2699-
// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr, f32
2700-
//
2701-
// 2. Rewrite as a load.
2702-
// CHECK: %[[loaded:.*]] = llvm.load %[[gep]] {alignment = 4 : i64} : !llvm.ptr -> vector<[17]xf32>
2703-
2704-
// -----
2705-
2706-
// CHECK-LABEL: func @transfer_read_write_1d_mask
2707-
// CHECK: %[[mask1:.*]] = arith.constant dense<[false, false, true, false, true]>
2708-
// CHECK: %[[cmpi:.*]] = arith.cmpi slt
2709-
// CHECK: %[[mask2:.*]] = arith.andi %[[cmpi]], %[[mask1]]
2710-
// CHECK: %[[r:.*]] = llvm.intr.masked.load %{{.*}}, %[[mask2]]
2711-
// CHECK: %[[cmpi_1:.*]] = arith.cmpi slt
2712-
// CHECK: %[[mask3:.*]] = arith.andi %[[cmpi_1]], %[[mask1]]
2713-
// CHECK: llvm.intr.masked.store %[[r]], %{{.*}}, %[[mask3]]
2714-
// CHECK: return %[[r]]
2715-
func.func @transfer_read_write_1d_mask(%A : memref<?xf32>, %base : index) -> vector<5xf32> {
2716-
%m = arith.constant dense<[0, 0, 1, 0, 1]> : vector<5xi1>
2717-
%f7 = arith.constant 7.0: f32
2718-
%f = vector.transfer_read %A[%base], %f7, %m : memref<?xf32>, vector<5xf32>
2719-
vector.transfer_write %f, %A[%base], %m : vector<5xf32>, memref<?xf32>
2720-
return %f: vector<5xf32>
2721-
}
2722-
2723-
// CHECK-LABEL: func @transfer_read_write_1d_mask_scalable
2724-
// CHECK-SAME: %[[mask:[a-zA-Z0-9]*]]: vector<[5]xi1>
2725-
// CHECK: %[[cmpi:.*]] = arith.cmpi slt
2726-
// CHECK: %[[mask1:.*]] = arith.andi %[[cmpi]], %[[mask]]
2727-
// CHECK: %[[r:.*]] = llvm.intr.masked.load %{{.*}}, %[[mask1]]
2728-
// CHECK: %[[cmpi_1:.*]] = arith.cmpi slt
2729-
// CHECK: %[[mask2:.*]] = arith.andi %[[cmpi_1]], %[[mask]]
2730-
// CHECK: llvm.intr.masked.store %[[r]], %{{.*}}, %[[mask2]]
2731-
// CHECK: return %[[r]]
2732-
func.func @transfer_read_write_1d_mask_scalable(%A : memref<?xf32>, %base : index, %m : vector<[5]xi1>) -> vector<[5]xf32> {
2733-
%f7 = arith.constant 7.0: f32
2734-
%f = vector.transfer_read %A[%base], %f7, %m : memref<?xf32>, vector<[5]xf32>
2735-
vector.transfer_write %f, %A[%base], %m : vector<[5]xf32>, memref<?xf32>
2736-
return %f: vector<[5]xf32>
2737-
}
2738-
2739-
// -----
2740-
2741-
// Can't lower xfer_read/xfer_write on tensors, but this shouldn't crash
2742-
2743-
// CHECK-LABEL: func @transfer_read_write_tensor
2744-
// CHECK: vector.transfer_read
2745-
// CHECK: vector.transfer_write
2746-
func.func @transfer_read_write_tensor(%A: tensor<?xf32>, %base : index) -> vector<4xf32> {
2747-
%f7 = arith.constant 7.0: f32
2748-
%c0 = arith.constant 0: index
2749-
%f = vector.transfer_read %A[%base], %f7 : tensor<?xf32>, vector<4xf32>
2750-
%w = vector.transfer_write %f, %A[%c0] : vector<4xf32>, tensor<?xf32>
2751-
"test.some_use"(%w) : (tensor<?xf32>) -> ()
2752-
return %f : vector<4xf32>
2753-
}
2754-
2755-
// -----
2756-
27572380
func.func @genbool_0d_f() -> vector<i1> {
27582381
%0 = vector.constant_mask [0] : vector<i1>
27592382
return %0 : vector<i1>
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
// RUN: mlir-opt %s -convert-vector-to-llvm -split-input-file | FileCheck %s

0 commit comments

Comments
 (0)