@@ -9927,11 +9927,11 @@ static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
9927
9927
const SDLoc &dl);
9928
9928
9929
9929
// X86 has dedicated shuffle that can be lowered to VEXPAND
9930
- static SDValue lowerShuffleToEXPAND (const SDLoc &DL, MVT VT,
9931
- const APInt &Zeroable ,
9932
- ArrayRef<int> Mask, SDValue &V1 ,
9933
- SDValue &V2, SelectionDAG &DAG ,
9934
- const X86Subtarget &Subtarget ) {
9930
+ static SDValue lowerShuffleWithEXPAND (const SDLoc &DL, MVT VT, SDValue V1 ,
9931
+ SDValue V2, ArrayRef<int> Mask ,
9932
+ const APInt &Zeroable ,
9933
+ const X86Subtarget &Subtarget ,
9934
+ SelectionDAG &DAG ) {
9935
9935
bool IsLeftZeroSide = true;
9936
9936
if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
9937
9937
IsLeftZeroSide))
@@ -15966,8 +15966,8 @@ static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15966
15966
15967
15967
// If we have VLX support, we can use VEXPAND.
15968
15968
if (Subtarget.hasVLX())
15969
- if (SDValue V = lowerShuffleToEXPAND (DL, MVT::v4f64, Zeroable, Mask, V1, V2,
15970
- DAG , Subtarget))
15969
+ if (SDValue V = lowerShuffleWithEXPAND (DL, MVT::v4f64, V1, V2, Mask ,
15970
+ Zeroable , Subtarget, DAG ))
15971
15971
return V;
15972
15972
15973
15973
// If we have AVX2 then we always want to lower with a blend because an v4 we
@@ -16046,8 +16046,8 @@ static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16046
16046
Zeroable, Subtarget, DAG))
16047
16047
return Rotate;
16048
16048
16049
- if (SDValue V = lowerShuffleToEXPAND (DL, MVT::v4i64, Zeroable, Mask, V1, V2,
16050
- DAG , Subtarget))
16049
+ if (SDValue V = lowerShuffleWithEXPAND (DL, MVT::v4i64, V1, V2, Mask ,
16050
+ Zeroable , Subtarget, DAG ))
16051
16051
return V;
16052
16052
}
16053
16053
@@ -16184,8 +16184,8 @@ static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16184
16184
16185
16185
// If we have VLX support, we can use VEXPAND.
16186
16186
if (Subtarget.hasVLX())
16187
- if (SDValue V = lowerShuffleToEXPAND (DL, MVT::v8f32, Zeroable, Mask, V1, V2,
16188
- DAG , Subtarget))
16187
+ if (SDValue V = lowerShuffleWithEXPAND (DL, MVT::v8f32, V1, V2, Mask ,
16188
+ Zeroable , Subtarget, DAG ))
16189
16189
return V;
16190
16190
16191
16191
// Try to match an interleave of two v8f32s and lower them as unpck and
@@ -16308,8 +16308,8 @@ static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16308
16308
Zeroable, Subtarget, DAG))
16309
16309
return Rotate;
16310
16310
16311
- if (SDValue V = lowerShuffleToEXPAND (DL, MVT::v8i32, Zeroable, Mask, V1, V2,
16312
- DAG , Subtarget))
16311
+ if (SDValue V = lowerShuffleWithEXPAND (DL, MVT::v8i32, V1, V2, Mask ,
16312
+ Zeroable , Subtarget, DAG ))
16313
16313
return V;
16314
16314
}
16315
16315
@@ -16827,8 +16827,8 @@ static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16827
16827
Zeroable, Subtarget, DAG))
16828
16828
return Op;
16829
16829
16830
- if (SDValue V = lowerShuffleToEXPAND (DL, MVT::v8f64, Zeroable, Mask, V1, V2 ,
16831
- DAG, Subtarget))
16830
+ if (SDValue V = lowerShuffleWithEXPAND (DL, MVT::v8f64, V1, V2, Mask, Zeroable ,
16831
+ Subtarget, DAG ))
16832
16832
return V;
16833
16833
16834
16834
if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
@@ -16898,8 +16898,8 @@ static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16898
16898
}
16899
16899
16900
16900
// If we have AVX512F support, we can use VEXPAND.
16901
- if (SDValue V = lowerShuffleToEXPAND (DL, MVT::v16f32, Zeroable , Mask,
16902
- V1, V2 , DAG, Subtarget ))
16901
+ if (SDValue V = lowerShuffleWithEXPAND (DL, MVT::v16f32, V1, V2 , Mask,
16902
+ Zeroable, Subtarget , DAG))
16903
16903
return V;
16904
16904
16905
16905
return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, Subtarget, DAG);
@@ -16967,8 +16967,8 @@ static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16967
16967
return Unpck;
16968
16968
16969
16969
// If we have AVX512F support, we can use VEXPAND.
16970
- if (SDValue V = lowerShuffleToEXPAND (DL, MVT::v8i64, Zeroable, Mask, V1, V2 ,
16971
- DAG, Subtarget))
16970
+ if (SDValue V = lowerShuffleWithEXPAND (DL, MVT::v8i64, V1, V2, Mask, Zeroable ,
16971
+ Subtarget, DAG ))
16972
16972
return V;
16973
16973
16974
16974
if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
@@ -17064,8 +17064,8 @@ static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17064
17064
return V;
17065
17065
17066
17066
// If we have AVX512F support, we can use VEXPAND.
17067
- if (SDValue V = lowerShuffleToEXPAND (DL, MVT::v16i32, Zeroable, Mask, V1, V2,
17068
- DAG , Subtarget))
17067
+ if (SDValue V = lowerShuffleWithEXPAND (DL, MVT::v16i32, V1, V2, Mask ,
17068
+ Zeroable , Subtarget, DAG ))
17069
17069
return V;
17070
17070
17071
17071
if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
0 commit comments