@@ -1295,4 +1295,30 @@ void foo23() {
12951295// OGCG: %[[NE_B_ZERO:.*]] = icmp ne <4 x i32> %[[TMP_B]], zeroinitializer
12961296// OGCG: %[[VEC_OR:.*]] = and <4 x i1> %[[NE_A_ZERO]], %[[NE_B_ZERO]]
12971297// OGCG: %[[RESULT:.*]] = sext <4 x i1> %[[VEC_OR]] to <4 x i32>
1298- // OGCG: store <4 x i32> %[[RESULT]], ptr %[[C_ADDR]], align 16
1298+ // OGCG: store <4 x i32> %[[RESULT]], ptr %[[C_ADDR]], align 16
1299+
1300+ void logical_not () {
1301+ vi4 a;
1302+ vi4 b = !a;
1303+ }
1304+
1305+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a"]
1306+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["b", init]
1307+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}}) %[[A_ADDR]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
1308+ // CIR: %[[CONST_V0:.*]] = cir.const #cir.zero : !cir.vector<4 x !s32i>
1309+ // CIR: %[[RESULT:.*]] = cir.vec.cmp(eq, %[[TMP_A]], %[[CONST_V0]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>
1310+ // CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
1311+
1312+ // LLVM: %[[A_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16
1313+ // LLVM: %[[B_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16
1314+ // LLVM: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16
1315+ // LLVM: %[[RESULT:.*]] = icmp eq <4 x i32> %[[TMP_A]], zeroinitializer
1316+ // LLVM: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32>
1317+ // LLVM: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16
1318+
1319+ // OGCG: %[[A_ADDR:.*]] = alloca <4 x i32>, align 16
1320+ // OGCG: %[[B_ADDR:.*]] = alloca <4 x i32>, align 16
1321+ // OGCG: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16
1322+ // OGCG: %[[RESULT:.*]] = icmp eq <4 x i32> %[[TMP_A]], zeroinitializer
1323+ // OGCG: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32>
1324+ // OGCG: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16
0 commit comments