@@ -43,9 +43,9 @@ __m128i test_mm_roti_epi8(__m128i a) {
4343
4444__m128i test_mm_roti_epi16 (__m128i a ) {
4545 // CIR-LABEL: test_mm_roti_epi16
46- // CIR: {{%.*}} = cir.cast integral {{%.*}} : !{{[us]}}8i -> !u16i
47- // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !{{[us]}}16i, !cir.vector<8 x !{{[us]}}16i >
48- // CIR: {{%.*}} = cir.call_llvm_intrinsic "fshl" {{.*}} : (!cir.vector<8 x !{{[su]}}16i>, !cir.vector<8 x !{{[su]}}16i>, !cir.vector<8 x !{{[su]}}16i >) -> !cir.vector<8 x !{{[su]}}16i>
46+ // CIR: {{%.*}} = cir.cast integral {{%.*}} : !u8i -> !u16i
47+ // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !{{[us]}}16i, !cir.vector<8 x !u16i >
48+ // CIR: {{%.*}} = cir.call_llvm_intrinsic "fshl" {{.*}} : (!cir.vector<8 x !{{[su]}}16i>, !cir.vector<8 x !{{[su]}}16i>, !cir.vector<8 x !u16i >) -> !cir.vector<8 x !{{[su]}}16i>
4949 // LLVM-LABEL: test_mm_roti_epi16
5050 // LLVM: %[[CASTED_VAR:.*]] = bitcast <2 x i64> {{%.*}} to <8 x i16>
5151 // LLVM: {{%.*}} = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %[[CASTED_VAR]], <8 x i16> %[[CASTED_VAR]], <8 x i16> splat (i16 50))
@@ -58,17 +58,23 @@ __m128i test_mm_roti_epi16(__m128i a) {
5858//NOTE: This only works as I expect for CIR but not for LLVMIR
5959__m128i test_mm_roti_epi32 (__m128i a ) {
6060 // CIR-LABEL: test_mm_roti_epi32
61- // CIR: {{%.*}} = cir.cast integral {{%.*}} : !{{[us]}}8i -> !u32i
62- // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !{{[us]}}32i, !cir.vector<4 x !{{[us]}}32i>
63- // CIR: {{%.*}} = cir.call_llvm_intrinsic "fshl" {{.*}} : (!cir.vector<4 x !{{[su]}}32i>, !cir.vector<4 x !{{[su]}}32i>, !cir.vector<4 x !{{[su]}}32i>) -> !cir.vector<4 x !{{[su]}}32i>
61+ // CIR: {{%.*}} = cir.cast integral {{%.*}} : !u8i -> !u32i
62+ // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !{{[us]}}32i, !cir.vector<4 x !u32i>
63+ // CIR: {{%.*}} = cir.call_llvm_intrinsic "fshl" {{.*}} : (!cir.vector<4 x !{{[su]}}32i>, !cir.vector<4 x !{{[su]}}32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !{{[su]}}32i>
64+ // LLVM-LABEL: test_mm_roti_epi32
65+ // LLVM: %[[CASTED_VAR:.*]] = bitcast <2 x i64> {{%.*}} to <4 x i32>
66+ // LLVM: {{%.*}} = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %[[CASTED_VAR]], <4 x i32> %[[CASTED_VAR]], <4 x i32> splat (i32 226))
67+ // OGCG-LABEL: test_mm_roti_epi32
68+ // OGCG: %[[CASTED_VAR:.*]] = bitcast <2 x i64> {{%.*}} to <4 x i32>
69+ // OGCG: {{%.*}} = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %[[CASTED_VAR]], <4 x i32> %[[CASTED_VAR]], <4 x i32> splat (i32 226))
6470 return _mm_roti_epi32 (a , -30 );
6571 }
6672
6773__m128i test_mm_roti_epi64 (__m128i a ) {
6874 // CIR-LABEL: test_mm_roti_epi64
69- // CIR: {{%.*}} = cir.cast integral {{%.*}} : !{{[us]}}8i -> !u64i
70- // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !{{.}}64i , !cir.vector<2 x !{{[us]}}64i >
71- // CIR: {{%.*}} = cir.call_llvm_intrinsic "fshl" {{.*}} : (!cir.vector<2 x !{{[su]}}64i>, !cir.vector<2 x !{{[su]}}64i>, !cir.vector<2 x !u64i>) -> !cir.vector<2 x !{{[su]}}64i >
75+ // CIR: {{%.*}} = cir.cast integral {{%.*}} : !u8i -> !u64i
76+ // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u64i , !cir.vector<2 x !u64i >
77+ // CIR: {{%.*}} = cir.call_llvm_intrinsic "fshl" {{.*}} : (!cir.vector<2 x !{{[su]}}64i>, !cir.vector<2 x !{{[su]}}64i>, !cir.vector<2 x !u64i>) -> !cir.vector<2 x !s64i >
7278 // LLVM-LABEL: test_mm_roti_epi64
7379 // LLVM: %[[VAR:.*]] = load <2 x i64>, ptr {{%.*}}, align 16
7480 // LLVM: {{%.*}} = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %[[VAR]], <2 x i64> %[[VAR]], <2 x i64> splat (i64 100))
0 commit comments