@@ -466,7 +466,7 @@ llvm::Value *CodeGenFunction::EmitFP8NeonFDOTCall(
466
466
if (ExtendLaneArg) {
467
467
auto *VT = llvm::FixedVectorType::get (Int8Ty, 16 );
468
468
Ops[2 ] = Builder.CreateInsertVector (VT, PoisonValue::get (VT), Ops[2 ],
469
- Builder. getInt64 (0 ));
469
+ uint64_t (0 ));
470
470
}
471
471
return EmitFP8NeonCall (IID, Tys, Ops, E, name);
472
472
}
@@ -478,7 +478,7 @@ llvm::Value *CodeGenFunction::EmitFP8NeonFMLACall(
478
478
if (ExtendLaneArg) {
479
479
auto *VT = llvm::FixedVectorType::get (Int8Ty, 16 );
480
480
Ops[2 ] = Builder.CreateInsertVector (VT, PoisonValue::get (VT), Ops[2 ],
481
- Builder. getInt64 (0 ));
481
+ uint64_t (0 ));
482
482
}
483
483
const unsigned ElemCount = Ops[0 ]->getType ()->getPrimitiveSizeInBits () /
484
484
RetTy->getPrimitiveSizeInBits ();
@@ -502,7 +502,7 @@ Value *CodeGenFunction::EmitFP8NeonCvtCall(unsigned IID, llvm::Type *Ty0,
502
502
// Op[0] is mfloat8x16_t, but the intrinsic converts only the lower part of
503
503
// the vector.
504
504
Tys[1 ] = llvm::FixedVectorType::get (Int8Ty, 8 );
505
- Ops[0 ] = Builder.CreateExtractVector (Tys[1 ], Ops[0 ], Builder. getInt64 (0 ));
505
+ Ops[0 ] = Builder.CreateExtractVector (Tys[1 ], Ops[0 ], uint64_t (0 ));
506
506
}
507
507
return EmitFP8NeonCall (IID, Tys, Ops, E, name);
508
508
}
@@ -4727,7 +4727,7 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
4727
4727
4728
4728
llvm::Type *OverloadedTy = getSVEVectorForElementType (EltTy);
4729
4729
Value *InsertSubVec = Builder.CreateInsertVector (
4730
- OverloadedTy, PoisonValue::get (OverloadedTy), Vec, Builder. getInt64 (0 ));
4730
+ OverloadedTy, PoisonValue::get (OverloadedTy), Vec, uint64_t (0 ));
4731
4731
4732
4732
Function *F =
4733
4733
CGM.getIntrinsic (Intrinsic::aarch64_sve_dupq_lane, OverloadedTy);
@@ -4810,7 +4810,7 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
4810
4810
case SVE::BI__builtin_sve_svset_neonq_f32:
4811
4811
case SVE::BI__builtin_sve_svset_neonq_f64:
4812
4812
case SVE::BI__builtin_sve_svset_neonq_bf16: {
4813
- return Builder.CreateInsertVector (Ty, Ops[0 ], Ops[1 ], Builder. getInt64 (0 ));
4813
+ return Builder.CreateInsertVector (Ty, Ops[0 ], Ops[1 ], uint64_t (0 ));
4814
4814
}
4815
4815
4816
4816
case SVE::BI__builtin_sve_svget_neonq_s8:
@@ -4825,7 +4825,7 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
4825
4825
case SVE::BI__builtin_sve_svget_neonq_f32:
4826
4826
case SVE::BI__builtin_sve_svget_neonq_f64:
4827
4827
case SVE::BI__builtin_sve_svget_neonq_bf16: {
4828
- return Builder.CreateExtractVector (Ty, Ops[0 ], Builder. getInt64 (0 ));
4828
+ return Builder.CreateExtractVector (Ty, Ops[0 ], uint64_t (0 ));
4829
4829
}
4830
4830
4831
4831
case SVE::BI__builtin_sve_svdup_neonq_s8:
@@ -4841,7 +4841,7 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
4841
4841
case SVE::BI__builtin_sve_svdup_neonq_f64:
4842
4842
case SVE::BI__builtin_sve_svdup_neonq_bf16: {
4843
4843
Value *Insert = Builder.CreateInsertVector (Ty, PoisonValue::get (Ty), Ops[0 ],
4844
- Builder. getInt64 (0 ));
4844
+ uint64_t (0 ));
4845
4845
return Builder.CreateIntrinsic (Intrinsic::aarch64_sve_dupq_lane, {Ty},
4846
4846
{Insert, Builder.getInt64 (0 )});
4847
4847
}
@@ -7767,7 +7767,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
7767
7767
case NEON::BI__builtin_neon_vcvt_high_mf8_f32_fpm: {
7768
7768
llvm::Type *Ty = llvm::FixedVectorType::get (Int8Ty, 16 );
7769
7769
Ops[0 ] = Builder.CreateInsertVector (Ty, PoisonValue::get (Ty), Ops[0 ],
7770
- Builder. getInt64 (0 ));
7770
+ uint64_t (0 ));
7771
7771
return EmitFP8NeonCvtCall (Intrinsic::aarch64_neon_fp8_fcvtn2, Ty,
7772
7772
Ops[1 ]->getType (), false , Ops, E, " vfcvtn2" );
7773
7773
}
0 commit comments