@@ -1253,7 +1253,7 @@ static bool structContainsExactlyOneFieldThatIsACapability(llvm::StructType* STy
1253
1253
// / destination type; in this situation the values of bits which not
1254
1254
// / present in the src are undefined.
1255
1255
static llvm::Value *CreateCoercedLoad (Address Src, llvm::Type *Ty,
1256
- CodeGenFunction &CGF) {
1256
+ QualType CType, CodeGenFunction &CGF) {
1257
1257
llvm::Type *SrcTy = Src.getElementType ();
1258
1258
1259
1259
// If SrcTy and Ty are the same, just do a load.
@@ -1325,10 +1325,12 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1325
1325
// Otherwise do coercion through memory. This is stupid, but simple.
1326
1326
Address Tmp =
1327
1327
CreateTempAllocaForCoercion (CGF, Ty, Src.getAlignment (), Src.getName ());
1328
- CGF.Builder .CreateMemCpy (
1329
- Tmp.getPointer (), Tmp.getAlignment ().getAsAlign (), Src.getPointer (),
1330
- Src.getAlignment ().getAsAlign (),
1331
- llvm::ConstantInt::get (CGF.IntPtrTy , SrcSize.getKnownMinSize ()));
1328
+ auto *Size = llvm::ConstantInt::get (CGF.IntPtrTy , SrcSize.getKnownMinSize ());
1329
+ auto PreserveTags = CGF.getTypes ().copyShouldPreserveTagsForPointee (
1330
+ CType, /* EffectiveTypeKnown=*/ true , Size);
1331
+ CGF.Builder .CreateMemCpy (Tmp.getPointer (), Tmp.getAlignment ().getAsAlign (),
1332
+ Src.getPointer (), Src.getAlignment ().getAsAlign (),
1333
+ Size, PreserveTags);
1332
1334
return CGF.Builder .CreateLoad (Tmp);
1333
1335
}
1334
1336
@@ -1356,10 +1358,8 @@ void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1356
1358
// /
1357
1359
// / This safely handles the case when the src type is larger than the
1358
1360
// / destination type; the upper bits of the src will be lost.
1359
- static void CreateCoercedStore (llvm::Value *Src,
1360
- Address Dst,
1361
- bool DstIsVolatile,
1362
- CodeGenFunction &CGF) {
1361
+ static void CreateCoercedStore (llvm::Value *Src, Address Dst, QualType CType,
1362
+ bool DstIsVolatile, CodeGenFunction &CGF) {
1363
1363
llvm::Type *SrcTy = Src->getType ();
1364
1364
llvm::Type *DstTy = Dst.getElementType ();
1365
1365
if (SrcTy == DstTy) {
@@ -1429,12 +1429,14 @@ static void CreateCoercedStore(llvm::Value *Src,
1429
1429
//
1430
1430
// FIXME: Assert that we aren't truncating non-padding bits when have access
1431
1431
// to that information.
1432
+ auto *Size = llvm::ConstantInt::get (CGF.IntPtrTy , DstSize.getFixedSize ());
1433
+ auto PreserveTags = CGF.getTypes ().copyShouldPreserveTagsForPointee (
1434
+ CType, /* EffectiveTypeKnown=*/ true , Size);
1432
1435
Address Tmp = CreateTempAllocaForCoercion (CGF, SrcTy, Dst.getAlignment ());
1433
1436
CGF.Builder .CreateStore (Src, Tmp);
1434
- CGF.Builder .CreateMemCpy (
1435
- Dst.getPointer (), Dst.getAlignment ().getAsAlign (), Tmp.getPointer (),
1436
- Tmp.getAlignment ().getAsAlign (),
1437
- llvm::ConstantInt::get (CGF.IntPtrTy , DstSize.getFixedSize ()));
1437
+ CGF.Builder .CreateMemCpy (Dst.getPointer (), Dst.getAlignment ().getAsAlign (),
1438
+ Tmp.getPointer (), Tmp.getAlignment ().getAsAlign (),
1439
+ Size, PreserveTags);
1438
1440
}
1439
1441
}
1440
1442
@@ -2975,7 +2977,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2975
2977
assert (NumIRArgs == 1 );
2976
2978
auto AI = Fn->getArg (FirstIRArg);
2977
2979
AI->setName (Arg->getName () + " .coerce" );
2978
- CreateCoercedStore (AI, Ptr, /* DstIsVolatile=*/ false , *this );
2980
+ CreateCoercedStore (AI, Ptr, Ty, /* DstIsVolatile=*/ false , *this );
2979
2981
}
2980
2982
2981
2983
// Match to what EmitParmDecl is expecting for this type.
@@ -3561,7 +3563,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
3561
3563
// If the value is offset in memory, apply the offset now.
3562
3564
Address V = emitAddressAtOffset (*this , ReturnValue, RetAI);
3563
3565
3564
- RV = CreateCoercedLoad (V, RetAI.getCoerceToType (), *this );
3566
+ RV = CreateCoercedLoad (V, RetAI.getCoerceToType (), RetTy, *this );
3565
3567
}
3566
3568
3567
3569
// In ARC, end functions that return a retainable type with a call
@@ -4782,6 +4784,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
4782
4784
for (CallArgList::const_iterator I = CallArgs.begin (), E = CallArgs.end ();
4783
4785
I != E; ++I, ++info_it, ++ArgNo) {
4784
4786
const ABIArgInfo &ArgInfo = info_it->info ;
4787
+ QualType ArgType = info_it->type ;
4785
4788
4786
4789
// Insert a padding argument to ensure proper alignment.
4787
4790
if (IRFunctionArgs.hasPaddingArg (ArgNo))
@@ -5035,7 +5038,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
5035
5038
// In the simple case, just pass the coerced loaded value.
5036
5039
assert (NumIRArgs == 1 );
5037
5040
llvm::Value *Load =
5038
- CreateCoercedLoad (Src, ArgInfo.getCoerceToType (), *this );
5041
+ CreateCoercedLoad (Src, ArgInfo.getCoerceToType (), ArgType, *this );
5039
5042
5040
5043
if (CallInfo.isCmseNSCall ()) {
5041
5044
// For certain parameter types, clear padding bits, as they may reveal
@@ -5573,7 +5576,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
5573
5576
5574
5577
// If the value is offset in memory, apply the offset now.
5575
5578
Address StorePtr = emitAddressAtOffset (*this , DestPtr, RetAI);
5576
- CreateCoercedStore (CI, StorePtr, DestIsVolatile, *this );
5579
+ CreateCoercedStore (CI, StorePtr, RetTy, DestIsVolatile, *this );
5577
5580
5578
5581
return convertTempToRValue (DestPtr, RetTy, SourceLocation ());
5579
5582
}
0 commit comments