@@ -34,7 +34,7 @@ use cranelift_codegen::{
34
34
isa:: {
35
35
unwind:: UnwindInst ,
36
36
x64:: {
37
- args:: { Avx512Opcode , AvxOpcode , FenceKind , RegMemImm , XmmMemImm , CC } ,
37
+ args:: { Avx512Opcode , AvxOpcode , FenceKind , CC } ,
38
38
settings as x64_settings, AtomicRmwSeqOp ,
39
39
} ,
40
40
} ,
@@ -1655,7 +1655,7 @@ impl Masm for MacroAssembler {
1655
1655
. xmm_vpcmpeq_rrr ( writable ! ( lhs) , lhs, rhs, kind. lane_size ( ) ) ;
1656
1656
self . asm
1657
1657
. xmm_vpcmpeq_rrr ( writable ! ( rhs) , rhs, rhs, kind. lane_size ( ) ) ;
1658
- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
1658
+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
1659
1659
}
1660
1660
VectorEqualityKind :: F32x4 | VectorEqualityKind :: F64x2 => {
1661
1661
self . asm
@@ -1694,7 +1694,7 @@ impl Masm for MacroAssembler {
1694
1694
. xmm_vpcmpeq_rrr ( writable ! ( lhs) , lhs, rhs, kind. lane_size ( ) ) ;
1695
1695
self . asm
1696
1696
. xmm_vpcmpeq_rrr ( writable ! ( rhs) , rhs, rhs, kind. lane_size ( ) ) ;
1697
- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
1697
+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
1698
1698
}
1699
1699
VectorCompareKind :: F32x4 | VectorCompareKind :: F64x2 => {
1700
1700
self . asm
@@ -1727,7 +1727,7 @@ impl Masm for MacroAssembler {
1727
1727
. xmm_vpcmpgt_rrr ( writable ! ( lhs) , lhs, rhs, kind. lane_size ( ) ) ;
1728
1728
self . asm
1729
1729
. xmm_vpcmpeq_rrr ( writable ! ( rhs) , rhs, rhs, kind. lane_size ( ) ) ;
1730
- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
1730
+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
1731
1731
}
1732
1732
VectorCompareKind :: I8x16U | VectorCompareKind :: I16x8U | VectorCompareKind :: I32x4U => {
1733
1733
// Set the `rhs` vector to the signed minimum values and then
@@ -1772,7 +1772,7 @@ impl Masm for MacroAssembler {
1772
1772
. xmm_vpcmpeq_rrr ( writable ! ( lhs) , lhs, rhs, kind. lane_size ( ) ) ;
1773
1773
self . asm
1774
1774
. xmm_vpcmpeq_rrr ( writable ! ( rhs) , rhs, rhs, kind. lane_size ( ) ) ;
1775
- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
1775
+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpxor , lhs, rhs, dst) ;
1776
1776
}
1777
1777
VectorCompareKind :: F32x4 | VectorCompareKind :: F64x2 => {
1778
1778
// Do a less than comparison with the operands swapped.
@@ -1806,7 +1806,7 @@ impl Masm for MacroAssembler {
1806
1806
. xmm_vpcmpgt_rrr ( writable ! ( rhs) , rhs, lhs, kind. lane_size ( ) ) ;
1807
1807
self . asm . xmm_vpcmpeq_rrr ( dst, lhs, lhs, kind. lane_size ( ) ) ;
1808
1808
self . asm
1809
- . xmm_rmi_rvex ( AvxOpcode :: Vpxor , dst. to_reg ( ) , rhs, dst) ;
1809
+ . xmm_vex_rr ( AvxOpcode :: Vpxor , dst. to_reg ( ) , rhs, dst) ;
1810
1810
}
1811
1811
VectorCompareKind :: I8x16U | VectorCompareKind :: I16x8U | VectorCompareKind :: I32x4U => {
1812
1812
// Set lanes to maximum values and compare them for equality.
@@ -1835,34 +1835,34 @@ impl Masm for MacroAssembler {
1835
1835
let tmp = regs:: scratch_xmm ( ) ;
1836
1836
// First, we initialize `tmp` with all ones, by comparing it with itself.
1837
1837
self . asm
1838
- . xmm_rmi_rvex ( AvxOpcode :: Vpcmpeqd , tmp, tmp, writable ! ( tmp) ) ;
1838
+ . xmm_vex_rr ( AvxOpcode :: Vpcmpeqd , tmp, tmp, writable ! ( tmp) ) ;
1839
1839
// then we `xor` tmp and `dst` together, yielding `!dst`.
1840
1840
self . asm
1841
- . xmm_rmi_rvex ( AvxOpcode :: Vpxor , tmp, dst. to_reg ( ) , dst) ;
1841
+ . xmm_vex_rr ( AvxOpcode :: Vpxor , tmp, dst. to_reg ( ) , dst) ;
1842
1842
Ok ( ( ) )
1843
1843
}
1844
1844
1845
1845
fn v128_and ( & mut self , src1 : Reg , src2 : Reg , dst : WritableReg ) -> Result < ( ) > {
1846
1846
self . ensure_has_avx ( ) ?;
1847
- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpand , src1, src2, dst) ;
1847
+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpand , src1, src2, dst) ;
1848
1848
Ok ( ( ) )
1849
1849
}
1850
1850
1851
1851
fn v128_and_not ( & mut self , src1 : Reg , src2 : Reg , dst : WritableReg ) -> Result < ( ) > {
1852
1852
self . ensure_has_avx ( ) ?;
1853
- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpandn , src1, src2, dst) ;
1853
+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpandn , src1, src2, dst) ;
1854
1854
Ok ( ( ) )
1855
1855
}
1856
1856
1857
1857
fn v128_or ( & mut self , src1 : Reg , src2 : Reg , dst : WritableReg ) -> Result < ( ) > {
1858
1858
self . ensure_has_avx ( ) ?;
1859
- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpor , src1, src2, dst) ;
1859
+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpor , src1, src2, dst) ;
1860
1860
Ok ( ( ) )
1861
1861
}
1862
1862
1863
1863
fn v128_xor ( & mut self , src1 : Reg , src2 : Reg , dst : WritableReg ) -> Result < ( ) > {
1864
1864
self . ensure_has_avx ( ) ?;
1865
- self . asm . xmm_rmi_rvex ( AvxOpcode :: Vpxor , src1, src2, dst) ;
1865
+ self . asm . xmm_vex_rr ( AvxOpcode :: Vpxor , src1, src2, dst) ;
1866
1866
Ok ( ( ) )
1867
1867
}
1868
1868
@@ -1913,7 +1913,7 @@ impl Masm for MacroAssembler {
1913
1913
} ,
1914
1914
} ;
1915
1915
1916
- self . asm . xmm_rmi_rvex ( op, lhs, rhs, dst) ;
1916
+ self . asm . xmm_vex_rr ( op, lhs, rhs, dst) ;
1917
1917
1918
1918
Ok ( ( ) )
1919
1919
}
@@ -1948,7 +1948,7 @@ impl Masm for MacroAssembler {
1948
1948
} ,
1949
1949
} ;
1950
1950
1951
- self . asm . xmm_rmi_rvex ( op, lhs, rhs, dst) ;
1951
+ self . asm . xmm_vex_rr ( op, lhs, rhs, dst) ;
1952
1952
1953
1953
Ok ( ( ) )
1954
1954
}
@@ -1965,7 +1965,7 @@ impl Masm for MacroAssembler {
1965
1965
1966
1966
let mul_avx = |this : & mut Self , op| {
1967
1967
this. asm
1968
- . xmm_rmi_rvex ( op, lhs. reg , rhs. reg , writable ! ( lhs. reg) ) ;
1968
+ . xmm_vex_rr ( op, lhs. reg , rhs. reg , writable ! ( lhs. reg) ) ;
1969
1969
} ;
1970
1970
1971
1971
let mul_i64x2_avx512 = |this : & mut Self | {
@@ -2004,48 +2004,36 @@ impl Masm for MacroAssembler {
2004
2004
let tmp2 = context. any_fpr ( this) ?;
2005
2005
2006
2006
// tmp1 = lhs_hi = (lhs >> 32)
2007
- this. asm . xmm_rmi_rvex (
2008
- AvxOpcode :: Vpsrlq ,
2009
- lhs. reg ,
2010
- XmmMemImm :: unwrap_new ( RegMemImm :: imm ( 32 ) ) ,
2011
- writable ! ( tmp1) ,
2012
- ) ;
2007
+ this. asm
2008
+ . xmm_vex_ri ( AvxOpcode :: Vpsrlq , lhs. reg , 32 , writable ! ( tmp1) ) ;
2013
2009
// tmp2 = lhs_hi * rhs_low = tmp1 * rhs
2014
2010
this. asm
2015
- . xmm_rmi_rvex ( AvxOpcode :: Vpmuldq , tmp1, rhs. reg , writable ! ( tmp2) ) ;
2011
+ . xmm_vex_rr ( AvxOpcode :: Vpmuldq , tmp1, rhs. reg , writable ! ( tmp2) ) ;
2016
2012
2017
2013
// tmp1 = rhs_hi = rhs >> 32
2018
- this. asm . xmm_rmi_rvex (
2019
- AvxOpcode :: Vpsrlq ,
2020
- rhs. reg ,
2021
- XmmMemImm :: unwrap_new ( RegMemImm :: imm ( 32 ) ) ,
2022
- writable ! ( tmp1) ,
2023
- ) ;
2014
+ this. asm
2015
+ . xmm_vex_ri ( AvxOpcode :: Vpsrlq , rhs. reg , 32 , writable ! ( tmp1) ) ;
2024
2016
2025
2017
// tmp1 = lhs_low * rhs_high = tmp1 * lhs
2026
2018
this. asm
2027
- . xmm_rmi_rvex ( AvxOpcode :: Vpmuludq , tmp1, lhs. reg , writable ! ( tmp1) ) ;
2019
+ . xmm_vex_rr ( AvxOpcode :: Vpmuludq , tmp1, lhs. reg , writable ! ( tmp1) ) ;
2028
2020
2029
2021
// tmp1 = ((lhs_hi * rhs_low) + (lhs_lo * rhs_hi)) = tmp1 + tmp2
2030
2022
this. asm
2031
- . xmm_rmi_rvex ( AvxOpcode :: Vpaddq , tmp1, tmp2, writable ! ( tmp1) ) ;
2023
+ . xmm_vex_rr ( AvxOpcode :: Vpaddq , tmp1, tmp2, writable ! ( tmp1) ) ;
2032
2024
2033
2025
//tmp1 = tmp1 << 32
2034
- this. asm . xmm_rmi_rvex (
2035
- AvxOpcode :: Vpsllq ,
2036
- tmp1,
2037
- XmmMemImm :: unwrap_new ( RegMemImm :: imm ( 32 ) ) ,
2038
- writable ! ( tmp1) ,
2039
- ) ;
2026
+ this. asm
2027
+ . xmm_vex_ri ( AvxOpcode :: Vpsllq , tmp1, 32 , writable ! ( tmp1) ) ;
2040
2028
2041
2029
// tmp2 = lhs_lo + rhs_lo
2042
2030
this. asm
2043
- . xmm_rmi_rvex ( AvxOpcode :: Vpmuludq , lhs. reg , rhs. reg , writable ! ( tmp2) ) ;
2031
+ . xmm_vex_rr ( AvxOpcode :: Vpmuludq , lhs. reg , rhs. reg , writable ! ( tmp2) ) ;
2044
2032
2045
2033
// finally, with `lhs` as destination:
2046
2034
// lhs = (lhs_low * rhs_low) + ((lhs_hi * rhs_low) + (lhs_lo * rhs_hi)) = tmp1 + tmp2
2047
2035
this. asm
2048
- . xmm_rmi_rvex ( AvxOpcode :: Vpaddq , tmp1, tmp2, writable ! ( lhs. reg) ) ;
2036
+ . xmm_vex_rr ( AvxOpcode :: Vpaddq , tmp1, tmp2, writable ! ( lhs. reg) ) ;
2049
2037
2050
2038
context. free_reg ( tmp2) ;
2051
2039
0 commit comments