|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 |
| 2 | +; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \ |
| 3 | +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC_64LE |
| 4 | + |
| 5 | +; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc64-ibm-aix \ |
| 6 | +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC_64 |
| 7 | + |
| 8 | +; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc-ibm-aix \ |
| 9 | +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC_32 |
| 10 | + |
| 11 | +; Currently the generated code uses `vspltisw` to generate vector of 1s followed by add operation. |
| 12 | +; This pattern is expected to be optimized in a future patch by using `xxleqv` to generate vector of -1s |
| 13 | +; followed by subtraction operation. |
| 14 | +define dso_local noundef <4 x i32> @test1(<4 x i32> %a) { |
| 15 | +; POWERPC_64LE-LABEL: test1: |
| 16 | +; POWERPC_64LE: # %bb.0: # %entry |
| 17 | +; POWERPC_64LE-NEXT: vspltisw v3, 1 |
| 18 | +; POWERPC_64LE-NEXT: vadduwm v2, v2, v3 |
| 19 | +; POWERPC_64LE-NEXT: blr |
| 20 | +; |
| 21 | +; POWERPC_64-LABEL: test1: |
| 22 | +; POWERPC_64: # %bb.0: # %entry |
| 23 | +; POWERPC_64-NEXT: vspltisw v3, 1 |
| 24 | +; POWERPC_64-NEXT: vadduwm v2, v2, v3 |
| 25 | +; POWERPC_64-NEXT: blr |
| 26 | +; |
| 27 | +; POWERPC_32-LABEL: test1: |
| 28 | +; POWERPC_32: # %bb.0: # %entry |
| 29 | +; POWERPC_32-NEXT: vspltisw v3, 1 |
| 30 | +; POWERPC_32-NEXT: vadduwm v2, v2, v3 |
| 31 | +; POWERPC_32-NEXT: blr |
| 32 | +entry: |
| 33 | + %add = add <4 x i32> %a, splat (i32 1) |
| 34 | + ret <4 x i32> %add |
| 35 | +} |
0 commit comments