diff --git a/clang/include/clang/Basic/BuiltinsRISCV.td b/clang/include/clang/Basic/BuiltinsRISCV.td index 4cc89a8a9d8af..3263603a8a1cf 100644 --- a/clang/include/clang/Basic/BuiltinsRISCV.td +++ b/clang/include/clang/Basic/BuiltinsRISCV.td @@ -146,3 +146,8 @@ let Features = "zihintntl", Attributes = [CustomTypeChecking] in { def ntl_load : RISCVBuiltin<"void(...)">; def ntl_store : RISCVBuiltin<"void(...)">; } // Features = "zihintntl", Attributes = [CustomTypeChecking] + +//===----------------------------------------------------------------------===// +// XCV extensions. +//===----------------------------------------------------------------------===// +include "clang/Basic/BuiltinsRISCVXCV.td" diff --git a/clang/include/clang/Basic/BuiltinsRISCVXCV.td b/clang/include/clang/Basic/BuiltinsRISCVXCV.td new file mode 100644 index 0000000000000..06ce07ade5c12 --- /dev/null +++ b/clang/include/clang/Basic/BuiltinsRISCVXCV.td @@ -0,0 +1,41 @@ +//==- BuiltinsRISCVXCV.td - RISC-V CORE-V Builtin database ----*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the CORE-V-specific builtin function database. Users of +// this file must define the BUILTIN macro to make use of this information. +// +//===----------------------------------------------------------------------===// + +class RISCXCVBuiltin : TargetBuiltin { + let Spellings = ["__builtin_riscv_cv_" # NAME]; + let Prototype = prototype; + let Features = features; +} + +let Attributes = [NoThrow, Const] in { +//===----------------------------------------------------------------------===// +// XCValu extension. +//===----------------------------------------------------------------------===// +def alu_slet : RISCXCVBuiltin<"int(int, int)", "xcvalu">; +def alu_sletu : RISCXCVBuiltin<"int(unsigned int, unsigned int)", "xcvalu">; +def alu_exths : RISCXCVBuiltin<"int(int)", "xcvalu">; +def alu_exthz : RISCXCVBuiltin<"unsigned int(unsigned int)", "xcvalu">; +def alu_extbs : RISCXCVBuiltin<"int(int)", "xcvalu">; +def alu_extbz : RISCXCVBuiltin<"unsigned int(unsigned int)", "xcvalu">; + +def alu_clip : RISCXCVBuiltin<"int(int, int)", "xcvalu">; +def alu_clipu : RISCXCVBuiltin<"unsigned int(unsigned int, unsigned int)", "xcvalu">; +def alu_addN : RISCXCVBuiltin<"int(int, int, unsigned int)", "xcvalu">; +def alu_adduN : RISCXCVBuiltin<"unsigned int(unsigned int, unsigned int, unsigned int)", "xcvalu">; +def alu_addRN : RISCXCVBuiltin<"int(int, int, unsigned int)", "xcvalu">; +def alu_adduRN : RISCXCVBuiltin<"unsigned int(unsigned int, unsigned int, unsigned int)", "xcvalu">; +def alu_subN : RISCXCVBuiltin<"int(int, int, unsigned int)", "xcvalu">; +def alu_subuN : RISCXCVBuiltin<"unsigned int(unsigned int, unsigned int, unsigned int)", "xcvalu">; +def alu_subRN : RISCXCVBuiltin<"int(int, int, unsigned int)", "xcvalu">; +def alu_subuRN : RISCXCVBuiltin<"unsigned int(unsigned int, unsigned int, unsigned int)", "xcvalu">; +} // Attributes = [NoThrow, Const] diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index d739597de4c85..da3eca73bfb57 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -22340,10 +22340,60 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID, return Store; } + // XCValu + case RISCV::BI__builtin_riscv_cv_alu_addN: + ID = Intrinsic::riscv_cv_alu_addN; + break; + case RISCV::BI__builtin_riscv_cv_alu_addRN: + ID = Intrinsic::riscv_cv_alu_addRN; + break; + case RISCV::BI__builtin_riscv_cv_alu_adduN: + ID = Intrinsic::riscv_cv_alu_adduN; + break; + case RISCV::BI__builtin_riscv_cv_alu_adduRN: + ID = Intrinsic::riscv_cv_alu_adduRN; + break; + case RISCV::BI__builtin_riscv_cv_alu_clip: + ID = Intrinsic::riscv_cv_alu_clip; + break; + case RISCV::BI__builtin_riscv_cv_alu_clipu: + ID = Intrinsic::riscv_cv_alu_clipu; + break; + case RISCV::BI__builtin_riscv_cv_alu_extbs: + return Builder.CreateSExt(Builder.CreateTrunc(Ops[0], Int8Ty), Int32Ty, + "extbs"); + case RISCV::BI__builtin_riscv_cv_alu_extbz: + return Builder.CreateZExt(Builder.CreateTrunc(Ops[0], Int8Ty), Int32Ty, + "extbz"); + case RISCV::BI__builtin_riscv_cv_alu_exths: + return Builder.CreateSExt(Builder.CreateTrunc(Ops[0], Int16Ty), Int32Ty, + "exths"); + case RISCV::BI__builtin_riscv_cv_alu_exthz: + return Builder.CreateZExt(Builder.CreateTrunc(Ops[0], Int16Ty), Int32Ty, + "exthz"); + case RISCV::BI__builtin_riscv_cv_alu_slet: + return Builder.CreateZExt(Builder.CreateICmpSLE(Ops[0], Ops[1]), Int32Ty, + "sle"); + case RISCV::BI__builtin_riscv_cv_alu_sletu: + return Builder.CreateZExt(Builder.CreateICmpULE(Ops[0], Ops[1]), Int32Ty, + "sleu"); + case RISCV::BI__builtin_riscv_cv_alu_subN: + ID = Intrinsic::riscv_cv_alu_subN; + break; + case RISCV::BI__builtin_riscv_cv_alu_subRN: + ID = Intrinsic::riscv_cv_alu_subRN; + break; + case RISCV::BI__builtin_riscv_cv_alu_subuN: + ID = Intrinsic::riscv_cv_alu_subuN; + break; + case RISCV::BI__builtin_riscv_cv_alu_subuRN: + ID = Intrinsic::riscv_cv_alu_subuRN; + break; - // Vector builtins are handled from here. + // Vector builtins are handled from here. #include "clang/Basic/riscv_vector_builtin_cg.inc" - // SiFive Vector builtins are handled from here. + + // SiFive Vector builtins are handled from here. #include "clang/Basic/riscv_sifive_vector_builtin_cg.inc" } diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt index f5cc07c303f9e..ff392e7122a44 100644 --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -120,6 +120,7 @@ set(ppc_htm_files set(riscv_files riscv_bitmanip.h + riscv_corev_alu.h riscv_crypto.h riscv_ntlh.h sifive_vector.h diff --git a/clang/lib/Headers/riscv_corev_alu.h b/clang/lib/Headers/riscv_corev_alu.h new file mode 100644 index 0000000000000..d2832ddf72efe --- /dev/null +++ b/clang/lib/Headers/riscv_corev_alu.h @@ -0,0 +1,128 @@ +/*===---- riscv_corev_alu.h - CORE-V ALU intrinsics ------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __RISCV_COREV_ALU_H +#define __RISCV_COREV_ALU_H + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__riscv_xcvalu) + +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) + +static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_abs(long a) { + return __builtin_abs(a); +} + +static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_slet(long a, long b) { + return __builtin_riscv_cv_alu_slet(a, b); +} + +static __inline__ long __DEFAULT_FN_ATTRS +__riscv_cv_alu_sletu(unsigned long a, unsigned long b) { + return __builtin_riscv_cv_alu_sletu(a, b); +} + +static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_min(long a, long b) { + return __builtin_elementwise_min(a, b); +} + +static __inline__ unsigned long __DEFAULT_FN_ATTRS +__riscv_cv_alu_minu(unsigned long a, unsigned long b) { + return __builtin_elementwise_min(a, b); +} + +static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_max(long a, long b) { + return __builtin_elementwise_max(a, b); +} + +static __inline__ unsigned long __DEFAULT_FN_ATTRS +__riscv_cv_alu_maxu(unsigned long a, unsigned long b) { + return __builtin_elementwise_max(a, b); +} + +static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_exths(int16_t a) { + return __builtin_riscv_cv_alu_exths(a); +} + +static __inline__ unsigned long __DEFAULT_FN_ATTRS +__riscv_cv_alu_exthz(uint16_t a) { + return __builtin_riscv_cv_alu_exthz(a); +} + +static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_extbs(int8_t a) { + return __builtin_riscv_cv_alu_extbs(a); +} + +static __inline__ unsigned long __DEFAULT_FN_ATTRS +__riscv_cv_alu_extbz(uint8_t a) { + return __builtin_riscv_cv_alu_extbz(a); +} + +static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_clip(long a, + unsigned long b) { + return __builtin_riscv_cv_alu_clip(a, b); +} + +static __inline__ unsigned long __DEFAULT_FN_ATTRS +__riscv_cv_alu_clipu(unsigned long a, unsigned long b) { + return __builtin_riscv_cv_alu_clipu(a, b); +} + +static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_addN(long a, long b, + uint8_t shft) { + return __builtin_riscv_cv_alu_addN(a, b, shft); +} + +static __inline__ unsigned long __DEFAULT_FN_ATTRS +__riscv_cv_alu_adduN(unsigned long a, unsigned long b, uint8_t shft) { + return __builtin_riscv_cv_alu_adduN(a, b, shft); +} + +static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_addRN(long a, long b, + uint8_t shft) { + return __builtin_riscv_cv_alu_addRN(a, b, shft); +} + +static __inline__ unsigned long __DEFAULT_FN_ATTRS +__riscv_cv_alu_adduRN(unsigned long a, unsigned long b, uint8_t shft) { + return __builtin_riscv_cv_alu_adduRN(a, b, shft); +} + +static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_subN(long a, long b, + uint8_t shft) { + return __builtin_riscv_cv_alu_subN(a, b, shft); +} + +static __inline__ unsigned long __DEFAULT_FN_ATTRS +__riscv_cv_alu_subuN(unsigned long a, unsigned long b, uint8_t shft) { + return __builtin_riscv_cv_alu_subuN(a, b, shft); +} + +static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_subRN(long a, long b, + uint8_t shft) { + return __builtin_riscv_cv_alu_subRN(a, b, shft); +} + +static __inline__ unsigned long __DEFAULT_FN_ATTRS +__riscv_cv_alu_subuRN(unsigned long a, unsigned long b, uint8_t shft) { + return __builtin_riscv_cv_alu_subuRN(a, b, shft); +} + +#endif // defined(__riscv_xcvalu) + +#if defined(__cplusplus) +} +#endif + +#endif // define __RISCV_COREV_ALU_H diff --git a/clang/test/CodeGen/RISCV/riscv-xcvalu-c-api.c b/clang/test/CodeGen/RISCV/riscv-xcvalu-c-api.c new file mode 100644 index 0000000000000..b4690a5f1c1ca --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-xcvalu-c-api.c @@ -0,0 +1,434 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +xcvalu -emit-llvm %s -o - \ +// RUN: | FileCheck %s + +#include +#include + +// CHECK-LABEL: @test_alu_slet( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK-NEXT: [[SLE_I:%.*]] = zext i1 [[TMP4]] to i32 +// CHECK-NEXT: ret i32 [[SLE_I]] +// +int test_alu_slet(int32_t a, int32_t b) { + return __riscv_cv_alu_slet(a, b); +} + +// CHECK-LABEL: @test_alu_sletu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = icmp ule i32 [[TMP2]], [[TMP3]] +// CHECK-NEXT: [[SLEU_I:%.*]] = zext i1 [[TMP4]] to i32 +// CHECK-NEXT: ret i32 [[SLEU_I]] +// +int test_alu_sletu(uint32_t a, uint32_t b) { + return __riscv_cv_alu_sletu(a, b); +} + +// CHECK-LABEL: @test_alu_min( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[ELT_MIN_I:%.*]] = call i32 @llvm.smin.i32(i32 [[TMP2]], i32 [[TMP3]]) +// CHECK-NEXT: ret i32 [[ELT_MIN_I]] +// +int test_alu_min(int32_t a, int32_t b) { + return __riscv_cv_alu_min(a, b); +} + +// CHECK-LABEL: @test_alu_minu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[ELT_MIN_I:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP2]], i32 [[TMP3]]) +// CHECK-NEXT: ret i32 [[ELT_MIN_I]] +// +int test_alu_minu(uint32_t a, uint32_t b) { + return __riscv_cv_alu_minu(a, b); +} + +// CHECK-LABEL: @test_alu_max( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[ELT_MAX_I:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP2]], i32 [[TMP3]]) +// CHECK-NEXT: ret i32 [[ELT_MAX_I]] +// +int test_alu_max(int32_t a, int32_t b) { + return __riscv_cv_alu_max(a, b); +} + +// CHECK-LABEL: @test_alu_maxu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[ELT_MAX_I:%.*]] = call i32 @llvm.umax.i32(i32 [[TMP2]], i32 [[TMP3]]) +// CHECK-NEXT: ret i32 [[ELT_MAX_I]] +// +int test_alu_maxu(uint32_t a, uint32_t b) { + return __riscv_cv_alu_maxu(a, b); +} + +// CHECK-LABEL: @test_alu_exths( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store i16 [[A:%.*]], ptr [[A_ADDR]], align 2 +// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[A_ADDR]], align 2 +// CHECK-NEXT: store i16 [[TMP0]], ptr [[A_ADDR_I]], align 2 +// CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[A_ADDR_I]], align 2 +// CHECK-NEXT: [[CONV_I:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK-NEXT: [[EXTHS_I:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK-NEXT: ret i32 [[EXTHS_I]] +// +int test_alu_exths(int16_t a) { + return __riscv_cv_alu_exths(a); +} + +// CHECK-LABEL: @test_alu_exthz( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store i16 [[A:%.*]], ptr [[A_ADDR]], align 2 +// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[A_ADDR]], align 2 +// CHECK-NEXT: store i16 [[TMP0]], ptr [[A_ADDR_I]], align 2 +// CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[A_ADDR_I]], align 2 +// CHECK-NEXT: [[CONV_I:%.*]] = zext i16 [[TMP1]] to i32 +// CHECK-NEXT: [[EXTHZ_I:%.*]] = zext i16 [[TMP1]] to i32 +// CHECK-NEXT: ret i32 [[EXTHZ_I]] +// +int test_alu_exthz(uint16_t a) { + return __riscv_cv_alu_exthz(a); +} + +// CHECK-LABEL: @test_alu_extbs( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store i8 [[A:%.*]], ptr [[A_ADDR]], align 1 +// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1 +// CHECK-NEXT: store i8 [[TMP0]], ptr [[A_ADDR_I]], align 1 +// CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[A_ADDR_I]], align 1 +// CHECK-NEXT: [[CONV_I:%.*]] = sext i8 [[TMP1]] to i32 +// CHECK-NEXT: [[EXTBS_I:%.*]] = sext i8 [[TMP1]] to i32 +// CHECK-NEXT: ret i32 [[EXTBS_I]] +// +int test_alu_extbs(int8_t a) { + return __riscv_cv_alu_extbs(a); +} + +// CHECK-LABEL: @test_alu_extbz( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store i8 [[A:%.*]], ptr [[A_ADDR]], align 1 +// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1 +// CHECK-NEXT: store i8 [[TMP0]], ptr [[A_ADDR_I]], align 1 +// CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[A_ADDR_I]], align 1 +// CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP1]] to i32 +// CHECK-NEXT: [[EXTBZ_I:%.*]] = zext i8 [[TMP1]] to i32 +// CHECK-NEXT: ret i32 [[EXTBZ_I]] +// +int test_alu_extbz(uint8_t a) { + return __riscv_cv_alu_extbz(a); +} + +// CHECK-LABEL: @test_alu_clip( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 0, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.riscv.cv.alu.clip(i32 [[TMP1]], i32 [[TMP2]]) +// CHECK-NEXT: ret i32 [[TMP3]] +// +int test_alu_clip(int32_t a) { + return __riscv_cv_alu_clip(a, 0); +} + +// CHECK-LABEL: @test_alu_clipu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 0, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.riscv.cv.alu.clipu(i32 [[TMP1]], i32 [[TMP2]]) +// CHECK-NEXT: ret i32 [[TMP3]] +// +int test_alu_clipu(uint32_t a) { + return __riscv_cv_alu_clipu(a, 0); +} + +// CHECK-LABEL: @test_alu_addN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32 +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.addN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]]) +// CHECK-NEXT: ret i32 [[TMP5]] +// +int test_alu_addN(int32_t a, int32_t b) { + return __riscv_cv_alu_addN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_adduN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32 +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.adduN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]]) +// CHECK-NEXT: ret i32 [[TMP5]] +// +int test_alu_adduN(uint32_t a, uint32_t b) { + return __riscv_cv_alu_adduN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_addRN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32 +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.addRN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]]) +// CHECK-NEXT: ret i32 [[TMP5]] +// +int test_alu_addRN(int32_t a, int32_t b) { + return __riscv_cv_alu_addRN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_adduRN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32 +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.adduRN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]]) +// CHECK-NEXT: ret i32 [[TMP5]] +// +int test_alu_adduRN(uint32_t a, uint32_t b) { + return __riscv_cv_alu_adduRN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_subN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32 +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.subN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]]) +// CHECK-NEXT: ret i32 [[TMP5]] +// +int test_alu_subN(int32_t a, int32_t b) { + return __riscv_cv_alu_subN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_subuN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32 +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.subuN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]]) +// CHECK-NEXT: ret i32 [[TMP5]] +// +int test_alu_subuN(uint32_t a, uint32_t b) { + return __riscv_cv_alu_subuN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_subRN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32 +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.subRN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]]) +// CHECK-NEXT: ret i32 [[TMP5]] +// +int test_alu_subRN(int32_t a, int32_t b) { + return __riscv_cv_alu_subRN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_subuRN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR_I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[SHFT_ADDR_I:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: store i32 [[TMP1]], ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: store i8 0, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[B_ADDR_I]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[SHFT_ADDR_I]], align 1 +// CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP4]] to i32 +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.riscv.cv.alu.subuRN(i32 [[TMP2]], i32 [[TMP3]], i32 [[CONV_I]]) +// CHECK-NEXT: ret i32 [[TMP5]] +// +int test_alu_subuRN(uint32_t a, uint32_t b) { + return __riscv_cv_alu_subuRN(a, b, 0); +} diff --git a/clang/test/CodeGen/RISCV/riscv-xcvalu.c b/clang/test/CodeGen/RISCV/riscv-xcvalu.c new file mode 100644 index 0000000000000..e4c2a2c3ca28b --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-xcvalu.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +xcvalu -emit-llvm %s -o - \ +// RUN: | FileCheck %s + +#include + +// CHECK-LABEL: @test_abs( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[TMP0]], i1 true) +// CHECK-NEXT: ret i32 [[TMP1]] +// +int test_abs(int a) { + return __builtin_abs(a); +} + +// CHECK-LABEL: @test_alu_slet( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = icmp sle i32 [[TMP0]], [[TMP1]] +// CHECK-NEXT: [[SLE:%.*]] = zext i1 [[TMP2]] to i32 +// CHECK-NEXT: ret i32 [[SLE]] +// +int test_alu_slet(int32_t a, int32_t b) { + return __builtin_riscv_cv_alu_slet(a, b); +} + +// CHECK-LABEL: @test_alu_sletu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = icmp ule i32 [[TMP0]], [[TMP1]] +// CHECK-NEXT: [[SLEU:%.*]] = zext i1 [[TMP2]] to i32 +// CHECK-NEXT: ret i32 [[SLEU]] +// +int test_alu_sletu(uint32_t a, uint32_t b) { + return __builtin_riscv_cv_alu_sletu(a, b); +} + +// CHECK-LABEL: @test_alu_exths( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store i16 [[A:%.*]], ptr [[A_ADDR]], align 2 +// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[A_ADDR]], align 2 +// CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CONV]] to i16 +// CHECK-NEXT: [[EXTHS:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK-NEXT: ret i32 [[EXTHS]] +// +int test_alu_exths(int16_t a) { + return __builtin_riscv_cv_alu_exths(a); +} + +// CHECK-LABEL: @test_alu_exthz( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store i16 [[A:%.*]], ptr [[A_ADDR]], align 2 +// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[A_ADDR]], align 2 +// CHECK-NEXT: [[CONV:%.*]] = zext i16 [[TMP0]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CONV]] to i16 +// CHECK-NEXT: [[EXTHZ:%.*]] = zext i16 [[TMP1]] to i32 +// CHECK-NEXT: ret i32 [[EXTHZ]] +// +int test_alu_exthz(uint16_t a) { + return __builtin_riscv_cv_alu_exthz(a); +} + +// CHECK-LABEL: @test_alu_extbs( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store i8 [[A:%.*]], ptr [[A_ADDR]], align 1 +// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1 +// CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CONV]] to i8 +// CHECK-NEXT: [[EXTBS:%.*]] = sext i8 [[TMP1]] to i32 +// CHECK-NEXT: ret i32 [[EXTBS]] +// +int test_alu_extbs(int8_t a) { + return __builtin_riscv_cv_alu_extbs(a); +} + +// CHECK-LABEL: @test_alu_extbz( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store i8 [[A:%.*]], ptr [[A_ADDR]], align 1 +// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1 +// CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[CONV]] to i8 +// CHECK-NEXT: [[EXTBZ:%.*]] = zext i8 [[TMP1]] to i32 +// CHECK-NEXT: ret i32 [[EXTBZ]] +// +int test_alu_extbz(uint8_t a) { + return __builtin_riscv_cv_alu_extbz(a); +} + +// CHECK-LABEL: @test_alu_clip( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.cv.alu.clip(i32 [[TMP0]], i32 15) +// CHECK-NEXT: ret i32 [[TMP1]] +// +int test_alu_clip(int32_t a) { + return __builtin_riscv_cv_alu_clip(a, 15); +} + +// CHECK-LABEL: @test_alu_clipu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.cv.alu.clipu(i32 [[TMP0]], i32 15) +// CHECK-NEXT: ret i32 [[TMP1]] +// +int test_alu_clipu(uint32_t a) { + return __builtin_riscv_cv_alu_clipu(a, 15); +} + +// CHECK-LABEL: @test_alu_addN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.addN(i32 [[TMP0]], i32 [[TMP1]], i32 0) +// CHECK-NEXT: ret i32 [[TMP2]] +// +int test_alu_addN(int32_t a, int32_t b) { + return __builtin_riscv_cv_alu_addN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_adduN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.adduN(i32 [[TMP0]], i32 [[TMP1]], i32 0) +// CHECK-NEXT: ret i32 [[TMP2]] +// +int test_alu_adduN(uint32_t a, uint32_t b) { + return __builtin_riscv_cv_alu_adduN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_addRN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.addRN(i32 [[TMP0]], i32 [[TMP1]], i32 0) +// CHECK-NEXT: ret i32 [[TMP2]] +// +int test_alu_addRN(int32_t a, int32_t b) { + return __builtin_riscv_cv_alu_addRN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_adduRN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.adduRN(i32 [[TMP0]], i32 [[TMP1]], i32 0) +// CHECK-NEXT: ret i32 [[TMP2]] +// +int test_alu_adduRN(uint32_t a, uint32_t b) { + return __builtin_riscv_cv_alu_adduRN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_subN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.subN(i32 [[TMP0]], i32 [[TMP1]], i32 0) +// CHECK-NEXT: ret i32 [[TMP2]] +// +int test_alu_subN(int32_t a, int32_t b) { + return __builtin_riscv_cv_alu_subN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_subuN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.subuN(i32 [[TMP0]], i32 [[TMP1]], i32 0) +// CHECK-NEXT: ret i32 [[TMP2]] +// +int test_alu_subuN(uint32_t a, uint32_t b) { + return __builtin_riscv_cv_alu_subuN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_subRN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.subRN(i32 [[TMP0]], i32 [[TMP1]], i32 0) +// CHECK-NEXT: ret i32 [[TMP2]] +// +int test_alu_subRN(int32_t a, int32_t b) { + return __builtin_riscv_cv_alu_subRN(a, b, 0); +} + +// CHECK-LABEL: @test_alu_subuRN( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.cv.alu.subuRN(i32 [[TMP0]], i32 [[TMP1]], i32 0) +// CHECK-NEXT: ret i32 [[TMP2]] +// +int test_alu_subuRN(uint32_t a, uint32_t b) { + return __builtin_riscv_cv_alu_subuRN(a, b, 0); +} diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td b/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td index 38263f375c469..6e7e90438c621 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td @@ -61,14 +61,14 @@ let TargetPrefix = "riscv" in { def int_riscv_cv_alu_clip : ScalarCoreVAluGprGprIntrinsic; def int_riscv_cv_alu_clipu : ScalarCoreVAluGprGprIntrinsic; - def int_riscv_cv_alu_addn : ScalarCoreVAluGprGprGprIntrinsic; - def int_riscv_cv_alu_addun : ScalarCoreVAluGprGprGprIntrinsic; - def int_riscv_cv_alu_addrn : ScalarCoreVAluGprGprGprIntrinsic; - def int_riscv_cv_alu_addurn : ScalarCoreVAluGprGprGprIntrinsic; - def int_riscv_cv_alu_subn : ScalarCoreVAluGprGprGprIntrinsic; - def int_riscv_cv_alu_subun : ScalarCoreVAluGprGprGprIntrinsic; - def int_riscv_cv_alu_subrn : ScalarCoreVAluGprGprGprIntrinsic; - def int_riscv_cv_alu_suburn : ScalarCoreVAluGprGprGprIntrinsic; + def int_riscv_cv_alu_addN : ScalarCoreVAluGprGprGprIntrinsic; + def int_riscv_cv_alu_adduN : ScalarCoreVAluGprGprGprIntrinsic; + def int_riscv_cv_alu_addRN : ScalarCoreVAluGprGprGprIntrinsic; + def int_riscv_cv_alu_adduRN : ScalarCoreVAluGprGprGprIntrinsic; + def int_riscv_cv_alu_subN : ScalarCoreVAluGprGprGprIntrinsic; + def int_riscv_cv_alu_subuN : ScalarCoreVAluGprGprGprIntrinsic; + def int_riscv_cv_alu_subRN : ScalarCoreVAluGprGprGprIntrinsic; + def int_riscv_cv_alu_subuRN : ScalarCoreVAluGprGprGprIntrinsic; def int_riscv_cv_mac_mac : ScalarCoreVMacGprGprGprIntrinsic; def int_riscv_cv_mac_msu : ScalarCoreVMacGprGprGprIntrinsic; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td index b586b10192fff..b54baa16d9286 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td @@ -792,17 +792,18 @@ let Predicates = [HasVendorXCValu, IsRV32], AddedComplexity = 1 in { def : Pat<(sext_inreg (XLenVT GPR:$rs1), i16), (CV_EXTHS GPR:$rs1)>; def : Pat<(sext_inreg (XLenVT GPR:$rs1), i8), (CV_EXTBS GPR:$rs1)>; def : Pat<(and (XLenVT GPR:$rs1), 0xffff), (CV_EXTHZ GPR:$rs1)>; + def : Pat<(and (XLenVT GPR:$rs1), 0xff), (CV_EXTBZ GPR:$rs1)>; defm CLIP : PatCoreVAluGprImm; defm CLIPU : PatCoreVAluGprImm; - defm ADDN : PatCoreVAluGprGprImm; - defm ADDUN : PatCoreVAluGprGprImm; - defm ADDRN : PatCoreVAluGprGprImm; - defm ADDURN : PatCoreVAluGprGprImm; - defm SUBN : PatCoreVAluGprGprImm; - defm SUBUN : PatCoreVAluGprGprImm; - defm SUBRN : PatCoreVAluGprGprImm; - defm SUBURN : PatCoreVAluGprGprImm; + defm ADDN : PatCoreVAluGprGprImm; + defm ADDUN : PatCoreVAluGprGprImm; + defm ADDRN : PatCoreVAluGprGprImm; + defm ADDURN : PatCoreVAluGprGprImm; + defm SUBN : PatCoreVAluGprGprImm; + defm SUBUN : PatCoreVAluGprGprImm; + defm SUBRN : PatCoreVAluGprGprImm; + defm SUBURN : PatCoreVAluGprGprImm; } // Predicates = [HasVendorXCValu, IsRV32] //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/xcvalu.ll b/llvm/test/CodeGen/RISCV/xcvalu.ll index 1ddfa102aca71..54634383dfdac 100644 --- a/llvm/test/CodeGen/RISCV/xcvalu.ll +++ b/llvm/test/CodeGen/RISCV/xcvalu.ll @@ -91,6 +91,26 @@ define i32 @exthz(i16 %a) { ret i32 %1 } +define i32 @extbs(i8 %a) { +; CHECK-LABEL: extbs: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $x11 killed $x10 +; CHECK-NEXT: cv.extbs a0, a0 +; CHECK-NEXT: ret + %1 = sext i8 %a to i32 + ret i32 %1 +} + +define i32 @extbz(i8 %a) { +; CHECK-LABEL: extbz: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $x11 killed $x10 +; CHECK-NEXT: cv.extbz a0, a0 +; CHECK-NEXT: ret + %1 = zext i8 %a to i32 + ret i32 %1 +} + declare i32 @llvm.riscv.cv.alu.clip(i32, i32) define i32 @test.cv.alu.clip.case.a(i32 %a) { @@ -133,170 +153,170 @@ define i32 @test.cv.alu.clipu.case.b(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.addn(i32, i32, i32) +declare i32 @llvm.riscv.cv.alu.addN(i32, i32, i32) -define i32 @test.cv.alu.addn.case.a(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.addn.case.a: +define i32 @test.cv.alu.addN.case.a(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.addN.case.a: ; CHECK: # %bb.0: ; CHECK-NEXT: cv.addn a0, a0, a1, 15 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.addn(i32 %a, i32 %b, i32 15) + %1 = call i32 @llvm.riscv.cv.alu.addN(i32 %a, i32 %b, i32 15) ret i32 %1 } -define i32 @test.cv.alu.addn.case.b(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.addn.case.b: +define i32 @test.cv.alu.addN.case.b(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.addN.case.b: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: cv.addnr a0, a1, a2 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.addn(i32 %a, i32 %b, i32 32) + %1 = call i32 @llvm.riscv.cv.alu.addN(i32 %a, i32 %b, i32 32) ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.addun(i32, i32, i32) +declare i32 @llvm.riscv.cv.alu.adduN(i32, i32, i32) -define i32 @test.cv.alu.addun.case.a(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.addun.case.a: +define i32 @test.cv.alu.adduN.case.a(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.adduN.case.a: ; CHECK: # %bb.0: ; CHECK-NEXT: cv.addun a0, a0, a1, 15 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.addun(i32 %a, i32 %b, i32 15) + %1 = call i32 @llvm.riscv.cv.alu.adduN(i32 %a, i32 %b, i32 15) ret i32 %1 } -define i32 @test.cv.alu.addun.case.b(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.addun.case.b: +define i32 @test.cv.alu.adduN.case.b(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.adduN.case.b: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: cv.addunr a0, a1, a2 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.addun(i32 %a, i32 %b, i32 32) + %1 = call i32 @llvm.riscv.cv.alu.adduN(i32 %a, i32 %b, i32 32) ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.addrn(i32, i32, i32) +declare i32 @llvm.riscv.cv.alu.addRN(i32, i32, i32) -define i32 @test.cv.alu.addrn.case.a(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.addrn.case.a: +define i32 @test.cv.alu.addRN.case.a(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.addRN.case.a: ; CHECK: # %bb.0: ; CHECK-NEXT: cv.addrn a0, a0, a1, 15 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.addrn(i32 %a, i32 %b, i32 15) + %1 = call i32 @llvm.riscv.cv.alu.addRN(i32 %a, i32 %b, i32 15) ret i32 %1 } -define i32 @test.cv.alu.addrn.case.b(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.addrn.case.b: +define i32 @test.cv.alu.addRN.case.b(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.addRN.case.b: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: cv.addrnr a0, a1, a2 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.addrn(i32 %a, i32 %b, i32 32) + %1 = call i32 @llvm.riscv.cv.alu.addRN(i32 %a, i32 %b, i32 32) ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.addurn(i32, i32, i32) +declare i32 @llvm.riscv.cv.alu.adduRN(i32, i32, i32) -define i32 @test.cv.alu.addurn.case.a(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.addurn.case.a: +define i32 @test.cv.alu.adduRN.case.a(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.adduRN.case.a: ; CHECK: # %bb.0: ; CHECK-NEXT: cv.addurn a0, a0, a1, 15 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.addurn(i32 %a, i32 %b, i32 15) + %1 = call i32 @llvm.riscv.cv.alu.adduRN(i32 %a, i32 %b, i32 15) ret i32 %1 } -define i32 @test.cv.alu.addurn.case.b(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.addurn.case.b: +define i32 @test.cv.alu.adduRN.case.b(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.adduRN.case.b: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: cv.addurnr a0, a1, a2 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.addurn(i32 %a, i32 %b, i32 32) + %1 = call i32 @llvm.riscv.cv.alu.adduRN(i32 %a, i32 %b, i32 32) ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.subn(i32, i32, i32) +declare i32 @llvm.riscv.cv.alu.subN(i32, i32, i32) -define i32 @test.cv.alu.subn.case.a(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.subn.case.a: +define i32 @test.cv.alu.subN.case.a(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.subN.case.a: ; CHECK: # %bb.0: ; CHECK-NEXT: cv.subn a0, a0, a1, 15 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.subn(i32 %a, i32 %b, i32 15) + %1 = call i32 @llvm.riscv.cv.alu.subN(i32 %a, i32 %b, i32 15) ret i32 %1 } -define i32 @test.cv.alu.subn.case.b(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.subn.case.b: +define i32 @test.cv.alu.subN.case.b(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.subN.case.b: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: cv.subnr a0, a1, a2 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.subn(i32 %a, i32 %b, i32 32) + %1 = call i32 @llvm.riscv.cv.alu.subN(i32 %a, i32 %b, i32 32) ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.subun(i32, i32, i32) +declare i32 @llvm.riscv.cv.alu.subuN(i32, i32, i32) -define i32 @test.cv.alu.subun.case.a(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.subun.case.a: +define i32 @test.cv.alu.subuN.case.a(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.subuN.case.a: ; CHECK: # %bb.0: ; CHECK-NEXT: cv.subun a0, a0, a1, 15 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.subun(i32 %a, i32 %b, i32 15) + %1 = call i32 @llvm.riscv.cv.alu.subuN(i32 %a, i32 %b, i32 15) ret i32 %1 } -define i32 @test.cv.alu.subun.case.b(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.subun.case.b: +define i32 @test.cv.alu.subuN.case.b(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.subuN.case.b: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: cv.subunr a0, a1, a2 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.subun(i32 %a, i32 %b, i32 32) + %1 = call i32 @llvm.riscv.cv.alu.subuN(i32 %a, i32 %b, i32 32) ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.subrn(i32, i32, i32) +declare i32 @llvm.riscv.cv.alu.subRN(i32, i32, i32) -define i32 @test.cv.alu.subrn.case.a(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.subrn.case.a: +define i32 @test.cv.alu.subRN.case.a(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.subRN.case.a: ; CHECK: # %bb.0: ; CHECK-NEXT: cv.subrn a0, a0, a1, 15 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.subrn(i32 %a, i32 %b, i32 15) + %1 = call i32 @llvm.riscv.cv.alu.subRN(i32 %a, i32 %b, i32 15) ret i32 %1 } -define i32 @test.cv.alu.subrn.case.b(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.subrn.case.b: +define i32 @test.cv.alu.subRN.case.b(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.subRN.case.b: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: cv.subrnr a0, a1, a2 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.subrn(i32 %a, i32 %b, i32 32) + %1 = call i32 @llvm.riscv.cv.alu.subRN(i32 %a, i32 %b, i32 32) ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.suburn(i32, i32, i32) +declare i32 @llvm.riscv.cv.alu.subuRN(i32, i32, i32) -define i32 @test.cv.alu.suburn.case.a(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.suburn.case.a: +define i32 @test.cv.alu.subuRN.case.a(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.subuRN.case.a: ; CHECK: # %bb.0: ; CHECK-NEXT: cv.suburn a0, a0, a1, 15 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.suburn(i32 %a, i32 %b, i32 15) + %1 = call i32 @llvm.riscv.cv.alu.subuRN(i32 %a, i32 %b, i32 15) ret i32 %1 } -define i32 @test.cv.alu.suburn.case.b(i32 %a, i32 %b) { -; CHECK-LABEL: test.cv.alu.suburn.case.b: +define i32 @test.cv.alu.subuRN.case.b(i32 %a, i32 %b) { +; CHECK-LABEL: test.cv.alu.subuRN.case.b: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: cv.suburnr a0, a1, a2 ; CHECK-NEXT: ret - %1 = call i32 @llvm.riscv.cv.alu.suburn(i32 %a, i32 %b, i32 32) + %1 = call i32 @llvm.riscv.cv.alu.subuRN(i32 %a, i32 %b, i32 32) ret i32 %1 }