Skip to content

Conversation

@LiqinWeng
Copy link
Contributor

No description provided.

@llvmbot llvmbot added the llvm:analysis Includes value tracking, cost tables and constant folding label Nov 1, 2024
@llvmbot
Copy link
Member

llvmbot commented Nov 1, 2024

@llvm/pr-subscribers-llvm-analysis

Author: LiqinWeng (LiqinWeng)

Changes

Patch is 288.04 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/114504.diff

7 Files Affected:

  • (modified) llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll (+83-83)
  • (modified) llvm/test/Analysis/CostModel/RISCV/fp-sqrt-pow.ll (+36-36)
  • (modified) llvm/test/Analysis/CostModel/RISCV/fp-trig-log-exp.ll (+126-126)
  • (modified) llvm/test/Analysis/CostModel/RISCV/fround.ll (+304-304)
  • (modified) llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll (+284-284)
  • (modified) llvm/test/Analysis/CostModel/RISCV/int-min-max.ll (+152-152)
  • (modified) llvm/test/Analysis/CostModel/RISCV/int-sat-math.ll (+180-180)
diff --git a/llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll b/llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll
index 6e4061a42bf9b8..0b2c8da4438da2 100644
--- a/llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll
@@ -30,20 +30,20 @@ define void @fabs() {
   call <4 x float> @llvm.fabs.v4f32(<4 x float> undef)
   call <8 x float> @llvm.fabs.v8f32(<8 x float> undef)
   call <16 x float> @llvm.fabs.v16f32(<16 x float> undef)
-  call <vscale x 1 x float> @llvm.fabs.nvx1f32(<vscale x 1 x float> undef)
-  call <vscale x 2 x float> @llvm.fabs.nvx2f32(<vscale x 2 x float> undef)
-  call <vscale x 4 x float> @llvm.fabs.nvx4f32(<vscale x 4 x float> undef)
-  call <vscale x 8 x float> @llvm.fabs.nvx8f32(<vscale x 8 x float> undef)
-  call <vscale x 16 x float> @llvm.fabs.nvx16f32(<vscale x 16 x float> undef)
+  call <vscale x 1 x float> @llvm.fabs.nxv1f32(<vscale x 1 x float> undef)
+  call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> undef)
+  call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> undef)
+  call <vscale x 8 x float> @llvm.fabs.nxv8f32(<vscale x 8 x float> undef)
+  call <vscale x 16 x float> @llvm.fabs.nxv16f32(<vscale x 16 x float> undef)
   call double @llvm.fabs.f64(double undef)
   call <2 x double> @llvm.fabs.v2f64(<2 x double> undef)
   call <4 x double> @llvm.fabs.v4f64(<4 x double> undef)
   call <8 x double> @llvm.fabs.v8f64(<8 x double> undef)
   call <16 x double> @llvm.fabs.v16f64(<16 x double> undef)
-  call <vscale x 1 x double> @llvm.fabs.nvx1f64(<vscale x 1 x double> undef)
-  call <vscale x 2 x double> @llvm.fabs.nvx2f64(<vscale x 2 x double> undef)
-  call <vscale x 4 x double> @llvm.fabs.nvx4f64(<vscale x 4 x double> undef)
-  call <vscale x 8 x double> @llvm.fabs.nvx8f64(<vscale x 8 x double> undef)
+  call <vscale x 1 x double> @llvm.fabs.nxv1f64(<vscale x 1 x double> undef)
+  call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> undef)
+  call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> undef)
+  call <vscale x 8 x double> @llvm.fabs.nxv8f64(<vscale x 8 x double> undef)
   ret void
 }
 
@@ -65,10 +65,10 @@ define void @fabs_f16() {
   call <4 x half> @llvm.fabs.v4f16(<4 x half> undef)
   call <8 x half> @llvm.fabs.v8f16(<8 x half> undef)
   call <16 x half> @llvm.fabs.v16f16(<16 x half> undef)
-  call <vscale x 2 x half> @llvm.fabs.nvx2f16(<vscale x 2 x half> undef)
-  call <vscale x 4 x half> @llvm.fabs.nvx4f16(<vscale x 4 x half> undef)
-  call <vscale x 8 x half> @llvm.fabs.nvx8f16(<vscale x 8 x half> undef)
-  call <vscale x 16 x half> @llvm.fabs.nvx16f16(<vscale x 16 x half> undef)
+  call <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half> undef)
+  call <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half> undef)
+  call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> undef)
+  call <vscale x 16 x half> @llvm.fabs.nxv16f16(<vscale x 16 x half> undef)
   ret void
 }
 
@@ -100,20 +100,20 @@ define void @minnum() {
   call <4 x float> @llvm.minnum.v4f32(<4 x float> undef, <4 x float> undef)
   call <8 x float> @llvm.minnum.v8f32(<8 x float> undef, <8 x float> undef)
   call <16 x float> @llvm.minnum.v16f32(<16 x float> undef, <16 x float> undef)
-  call <vscale x 1 x float> @llvm.minnum.nvx1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef)
-  call <vscale x 2 x float> @llvm.minnum.nvx2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
-  call <vscale x 4 x float> @llvm.minnum.nvx4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
-  call <vscale x 8 x float> @llvm.minnum.nvx8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
-  call <vscale x 16 x float> @llvm.minnum.nvx16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+  call <vscale x 1 x float> @llvm.minnum.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef)
+  call <vscale x 2 x float> @llvm.minnum.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+  call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+  call <vscale x 8 x float> @llvm.minnum.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+  call <vscale x 16 x float> @llvm.minnum.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
   call double @llvm.minnum.f64(double undef, double undef)
   call <2 x double> @llvm.minnum.v2f64(<2 x double> undef, <2 x double> undef)
   call <4 x double> @llvm.minnum.v4f64(<4 x double> undef, <4 x double> undef)
   call <8 x double> @llvm.minnum.v8f64(<8 x double> undef, <8 x double> undef)
   call <16 x double> @llvm.minnum.v16f64(<16 x double> undef, <16 x double> undef)
-  call <vscale x 1 x double> @llvm.minnum.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
-  call <vscale x 2 x double> @llvm.minnum.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
-  call <vscale x 4 x double> @llvm.minnum.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
-  call <vscale x 8 x double> @llvm.minnum.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+  call <vscale x 1 x double> @llvm.minnum.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+  call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+  call <vscale x 4 x double> @llvm.minnum.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+  call <vscale x 8 x double> @llvm.minnum.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
   ret void
 }
 
@@ -149,11 +149,11 @@ define void @minnum_f16() {
   call <4 x half> @llvm.minnum.v4f16(<4 x half> undef, <4 x half> undef)
   call <8 x half> @llvm.minnum.v8f16(<8 x half> undef, <8 x half> undef)
   call <16 x half> @llvm.minnum.v16f16(<16 x half> undef, <16 x half> undef)
-  call <vscale x 1 x half> @llvm.minnum.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef)
-  call <vscale x 2 x half> @llvm.minnum.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef)
-  call <vscale x 4 x half> @llvm.minnum.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef)
-  call <vscale x 8 x half> @llvm.minnum.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef)
-  call <vscale x 16 x half> @llvm.minnum.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef)
+  call <vscale x 1 x half> @llvm.minnum.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef)
+  call <vscale x 2 x half> @llvm.minnum.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef)
+  call <vscale x 4 x half> @llvm.minnum.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef)
+  call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef)
+  call <vscale x 16 x half> @llvm.minnum.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef)
   ret void
 }
 
@@ -185,20 +185,20 @@ define void @maxnum() {
   call <4 x float> @llvm.maxnum.v4f32(<4 x float> undef, <4 x float> undef)
   call <8 x float> @llvm.maxnum.v8f32(<8 x float> undef, <8 x float> undef)
   call <16 x float> @llvm.maxnum.v16f32(<16 x float> undef, <16 x float> undef)
-  call <vscale x 1 x float> @llvm.maxnum.nvx1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef)
-  call <vscale x 2 x float> @llvm.maxnum.nvx2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
-  call <vscale x 4 x float> @llvm.maxnum.nvx4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
-  call <vscale x 8 x float> @llvm.maxnum.nvx8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
-  call <vscale x 16 x float> @llvm.maxnum.nvx16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+  call <vscale x 1 x float> @llvm.maxnum.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef)
+  call <vscale x 2 x float> @llvm.maxnum.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+  call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+  call <vscale x 8 x float> @llvm.maxnum.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+  call <vscale x 16 x float> @llvm.maxnum.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
   call double @llvm.maxnum.f64(double undef, double undef)
   call <2 x double> @llvm.maxnum.v2f64(<2 x double> undef, <2 x double> undef)
   call <4 x double> @llvm.maxnum.v4f64(<4 x double> undef, <4 x double> undef)
   call <8 x double> @llvm.maxnum.v8f64(<8 x double> undef, <8 x double> undef)
   call <16 x double> @llvm.maxnum.v16f64(<16 x double> undef, <16 x double> undef)
-  call <vscale x 1 x double> @llvm.maxnum.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
-  call <vscale x 2 x double> @llvm.maxnum.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
-  call <vscale x 4 x double> @llvm.maxnum.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
-  call <vscale x 8 x double> @llvm.maxnum.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+  call <vscale x 1 x double> @llvm.maxnum.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+  call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+  call <vscale x 4 x double> @llvm.maxnum.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+  call <vscale x 8 x double> @llvm.maxnum.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
   ret void
 }
 
@@ -234,11 +234,11 @@ define void @maxnum_f16() {
   call <4 x half> @llvm.maxnum.v4f16(<4 x half> undef, <4 x half> undef)
   call <8 x half> @llvm.maxnum.v8f16(<8 x half> undef, <8 x half> undef)
   call <16 x half> @llvm.maxnum.v16f16(<16 x half> undef, <16 x half> undef)
-  call <vscale x 1 x half> @llvm.maxnum.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef)
-  call <vscale x 2 x half> @llvm.maxnum.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef)
-  call <vscale x 4 x half> @llvm.maxnum.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef)
-  call <vscale x 8 x half> @llvm.maxnum.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef)
-  call <vscale x 16 x half> @llvm.maxnum.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef)
+  call <vscale x 1 x half> @llvm.maxnum.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef)
+  call <vscale x 2 x half> @llvm.maxnum.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef)
+  call <vscale x 4 x half> @llvm.maxnum.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef)
+  call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef)
+  call <vscale x 16 x half> @llvm.maxnum.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef)
   ret void
 }
 
@@ -270,20 +270,20 @@ define void @minimum() {
   call <4 x float> @llvm.minimum.v4f32(<4 x float> undef, <4 x float> undef)
   call <8 x float> @llvm.minimum.v8f32(<8 x float> undef, <8 x float> undef)
   call <16 x float> @llvm.minimum.v16f32(<16 x float> undef, <16 x float> undef)
-  call <vscale x 1 x float> @llvm.minimum.nvx1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef)
-  call <vscale x 2 x float> @llvm.minimum.nvx2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
-  call <vscale x 4 x float> @llvm.minimum.nvx4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
-  call <vscale x 8 x float> @llvm.minimum.nvx8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
-  call <vscale x 16 x float> @llvm.minimum.nvx16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+  call <vscale x 1 x float> @llvm.minimum.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef)
+  call <vscale x 2 x float> @llvm.minimum.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+  call <vscale x 4 x float> @llvm.minimum.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+  call <vscale x 8 x float> @llvm.minimum.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+  call <vscale x 16 x float> @llvm.minimum.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
   call double @llvm.minimum.f64(double undef, double undef)
   call <2 x double> @llvm.minimum.v2f64(<2 x double> undef, <2 x double> undef)
   call <4 x double> @llvm.minimum.v4f64(<4 x double> undef, <4 x double> undef)
   call <8 x double> @llvm.minimum.v8f64(<8 x double> undef, <8 x double> undef)
   call <16 x double> @llvm.minimum.v16f64(<16 x double> undef, <16 x double> undef)
-  call <vscale x 1 x double> @llvm.minimum.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
-  call <vscale x 2 x double> @llvm.minimum.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
-  call <vscale x 4 x double> @llvm.minimum.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
-  call <vscale x 8 x double> @llvm.minimum.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+  call <vscale x 1 x double> @llvm.minimum.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+  call <vscale x 2 x double> @llvm.minimum.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+  call <vscale x 4 x double> @llvm.minimum.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+  call <vscale x 8 x double> @llvm.minimum.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
   ret void
 }
 
@@ -319,11 +319,11 @@ define void @minimum_f16() {
   call <4 x half> @llvm.minimum.v4f16(<4 x half> undef, <4 x half> undef)
   call <8 x half> @llvm.minimum.v8f16(<8 x half> undef, <8 x half> undef)
   call <16 x half> @llvm.minimum.v16f16(<16 x half> undef, <16 x half> undef)
-  call <vscale x 1 x half> @llvm.minimum.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef)
-  call <vscale x 2 x half> @llvm.minimum.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef)
-  call <vscale x 4 x half> @llvm.minimum.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef)
-  call <vscale x 8 x half> @llvm.minimum.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef)
-  call <vscale x 16 x half> @llvm.minimum.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef)
+  call <vscale x 1 x half> @llvm.minimum.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef)
+  call <vscale x 2 x half> @llvm.minimum.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef)
+  call <vscale x 4 x half> @llvm.minimum.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef)
+  call <vscale x 8 x half> @llvm.minimum.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef)
+  call <vscale x 16 x half> @llvm.minimum.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef)
   ret void
 }
 
@@ -355,20 +355,20 @@ define void @maximum() {
   call <4 x float> @llvm.maximum.v4f32(<4 x float> undef, <4 x float> undef)
   call <8 x float> @llvm.maximum.v8f32(<8 x float> undef, <8 x float> undef)
   call <16 x float> @llvm.maximum.v16f32(<16 x float> undef, <16 x float> undef)
-  call <vscale x 1 x float> @llvm.maximum.nvx1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef)
-  call <vscale x 2 x float> @llvm.maximum.nvx2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
-  call <vscale x 4 x float> @llvm.maximum.nvx4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
-  call <vscale x 8 x float> @llvm.maximum.nvx8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
-  call <vscale x 16 x float> @llvm.maximum.nvx16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+  call <vscale x 1 x float> @llvm.maximum.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef)
+  call <vscale x 2 x float> @llvm.maximum.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+  call <vscale x 4 x float> @llvm.maximum.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+  call <vscale x 8 x float> @llvm.maximum.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+  call <vscale x 16 x float> @llvm.maximum.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
   call double @llvm.maximum.f64(double undef, double undef)
   call <2 x double> @llvm.maximum.v2f64(<2 x double> undef, <2 x double> undef)
   call <4 x double> @llvm.maximum.v4f64(<4 x double> undef, <4 x double> undef)
   call <8 x double> @llvm.maximum.v8f64(<8 x double> undef, <8 x double> undef)
   call <16 x double> @llvm.maximum.v16f64(<16 x double> undef, <16 x double> undef)
-  call <vscale x 1 x double> @llvm.maximum.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
-  call <vscale x 2 x double> @llvm.maximum.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
-  call <vscale x 4 x double> @llvm.maximum.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
-  call <vscale x 8 x double> @llvm.maximum.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+  call <vscale x 1 x double> @llvm.maximum.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
+  call <vscale x 2 x double> @llvm.maximum.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
+  call <vscale x 4 x double> @llvm.maximum.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
+  call <vscale x 8 x double> @llvm.maximum.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
   ret void
 }
 
@@ -404,11 +404,11 @@ define void @maximum_f16() {
   call <4 x half> @llvm.maximum.v4f16(<4 x half> undef, <4 x half> undef)
   call <8 x half> @llvm.maximum.v8f16(<8 x half> undef, <8 x half> undef)
   call <16 x half> @llvm.maximum.v16f16(<16 x half> undef, <16 x half> undef)
-  call <vscale x 1 x half> @llvm.maximum.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef)
-  call <vscale x 2 x half> @llvm.maximum.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef)
-  call <vscale x 4 x half> @llvm.maximum.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef)
-  call <vscale x 8 x half> @llvm.maximum.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef)
-  call <vscale x 16 x half> @llvm.maximum.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef)
+  call <vscale x 1 x half> @llvm.maximum.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef)
+  call <vscale x 2 x half> @llvm.maximum.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef)
+  call <vscale x 4 x half> @llvm.maximum.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef)
+  call <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef)
+  call <vscale x 16 x half> @llvm.maximum.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef)
   ret void
 }
 
@@ -440,20 +440,20 @@ define void @copysign() {
   call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef)
   call <8 x float> @llvm.copysign.v8f32(<8 x float> undef, <8 x float> undef)
   call <16 x float> @llvm.copysign.v16f32(<16 x float> undef, <16 x float> undef)
-  call <vscale x 1 x float> @llvm.copysign.nvx1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef)
-  call <vscale x 2 x float> @llvm.copysign.nvx2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
-  call <vscale x 4 x float> @llvm.copysign.nvx4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
-  call <vscale x 8 x float> @llvm.copysign.nvx8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
-  call <vscale x 16 x float> @llvm.copysign.nvx16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
+  call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef)
+  call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
+  call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
+  call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
+  call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
   call double @llvm.copysign.f64(double undef, double undef)
   call <2 x double> @llvm.copysign.v2f64(<2 x double> undef, <2 x double> undef)
   call <4 x double> @llvm.copysign.v4f64(<4 x double> un...
[truncated]

@LiqinWeng LiqinWeng changed the title [Test] Rename the test function suffix. NFC [Test] Rename the test function name suffix. NFC Nov 1, 2024
@zqb-all
Copy link
Contributor

zqb-all commented Nov 1, 2024

LGTM

@LiqinWeng LiqinWeng merged commit c3edeaa into llvm:main Nov 1, 2024
10 checks passed
@LiqinWeng LiqinWeng deleted the rename-test-name branch November 1, 2024 06:41
@llvm-ci
Copy link
Collaborator

llvm-ci commented Nov 2, 2024

LLVM Buildbot has detected a new failure on builder clang-ppc64-aix running on aix-ppc64 while building llvm at step 6 "test-build-unified-tree-check-all".

Full details are available at: https://lab.llvm.org/buildbot/#/builders/64/builds/1351

Here is the relevant piece of the build log for the reference
Step 6 (test-build-unified-tree-check-all) failure: test (failure)
******************** TEST 'lit :: googletest-timeout.py' FAILED ********************
Exit Code: 1

Command Output (stdout):
--
# RUN: at line 9
not env -u FILECHECK_OPTS "/opt/freeware/bin/python3.9" /home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/llvm-project/llvm/utils/lit/lit.py -j1 --order=lexical -v Inputs/googletest-timeout    --param gtest_filter=InfiniteLoopSubTest --timeout=1 > /home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/build/utils/lit/tests/Output/googletest-timeout.py.tmp.cmd.out
# executed command: not env -u FILECHECK_OPTS /opt/freeware/bin/python3.9 /home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/llvm-project/llvm/utils/lit/lit.py -j1 --order=lexical -v Inputs/googletest-timeout --param gtest_filter=InfiniteLoopSubTest --timeout=1
# .---command stderr------------
# | lit.py: /home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/llvm-project/llvm/utils/lit/lit/main.py:72: note: The test suite configuration requested an individual test timeout of 0 seconds but a timeout of 1 seconds was requested on the command line. Forcing timeout to be 1 seconds.
# | Traceback (most recent call last):
# |   File "/home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/llvm-project/llvm/utils/lit/lit/formats/googletest.py", line 304, in post_process_shard_results
# |     testsuites = json.load(f)["testsuites"]
# |   File "/opt/freeware/lib64/python3.9/json/__init__.py", line 293, in load
# |     return loads(fp.read(),
# |   File "/opt/freeware/lib64/python3.9/json/__init__.py", line 346, in loads
# |     return _default_decoder.decode(s)
# |   File "/opt/freeware/lib64/python3.9/json/decoder.py", line 337, in decode
# |     obj, end = self.raw_decode(s, idx=_w(s, 0).end())
# |   File "/opt/freeware/lib64/python3.9/json/decoder.py", line 355, in raw_decode
# |     raise JSONDecodeError("Expecting value", s, err.value) from None
# | json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
# | 
# | During handling of the above exception, another exception occurred:
# | 
# | Traceback (most recent call last):
# |   File "/home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/llvm-project/llvm/utils/lit/lit.py", line 6, in <module>
# |     main()
# |   File "/home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/llvm-project/llvm/utils/lit/lit/main.py", line 130, in main
# |     selected_tests, discovered_tests = GoogleTest.post_process_shard_results(
# |   File "/home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/llvm-project/llvm/utils/lit/lit/formats/googletest.py", line 306, in post_process_shard_results
# |     raise RuntimeError(
# | RuntimeError: Failed to parse json file: /home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/build/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py-googletest-timeout-59244926-1-2.json
# | 
# `-----------------------------
# RUN: at line 11
FileCheck --check-prefix=CHECK-INF < /home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/build/utils/lit/tests/Output/googletest-timeout.py.tmp.cmd.out /home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/build/utils/lit/tests/googletest-timeout.py
# executed command: FileCheck --check-prefix=CHECK-INF /home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/build/utils/lit/tests/googletest-timeout.py
# .---command stderr------------
# | /home/powerllvm/powerllvm_env/aix-ppc64/clang-ppc64-aix/build/utils/lit/tests/googletest-timeout.py:34:14: error: CHECK-INF: expected string not found in input
# | # CHECK-INF: Timed Out: 1
# |              ^
# | <stdin>:13:29: note: scanning from here
# | Reached timeout of 1 seconds
# |                             ^
# | <stdin>:15:21: note: possible intended match here
# | TIMEOUT: googletest-timeout :: DummySubDir/OneTest.py/1/2 (2 of 2)
# |                     ^
# | 
# | Input file: <stdin>
...

smallp-o-p pushed a commit to smallp-o-p/llvm-project that referenced this pull request Nov 3, 2024
NoumanAmir657 pushed a commit to NoumanAmir657/llvm-project that referenced this pull request Nov 4, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

llvm:analysis Includes value tracking, cost tables and constant folding

Projects

None yet

Development

Successfully merging this pull request may close these issues.

4 participants