@@ -600,8 +600,8 @@ static Value *getMask(Value *WideMask, unsigned Factor,
600
600
601
601
bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic (
602
602
IntrinsicInst *DI, SmallSetVector<Instruction *, 32 > &DeadInsts) {
603
- Value *LoadedVal = DI->getOperand (0 );
604
- if (!LoadedVal->hasOneUse ())
603
+ Instruction *LoadedVal = dyn_cast<Instruction>( DI->getOperand (0 ) );
604
+ if (!LoadedVal || !LoadedVal ->hasOneUse ())
605
605
return false ;
606
606
607
607
auto *LI = dyn_cast<LoadInst>(LoadedVal);
@@ -645,26 +645,27 @@ bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic(
645
645
}
646
646
647
647
// Try and match this with target specific intrinsics.
648
- if (!TLI->lowerDeinterleaveIntrinsicToLoad (cast<Instruction>(LoadedVal), Mask,
649
- DI))
648
+ if (!TLI->lowerDeinterleaveIntrinsicToLoad (LoadedVal, Mask, DI))
650
649
return false ;
651
650
652
651
DeadInsts.insert (DI);
653
652
// We now have a target-specific load, so delete the old one.
654
- DeadInsts.insert (cast<Instruction>( LoadedVal) );
653
+ DeadInsts.insert (LoadedVal);
655
654
return true ;
656
655
}
657
656
658
657
bool InterleavedAccessImpl::lowerInterleaveIntrinsic (
659
- IntrinsicInst *II, SmallSetVector<Instruction *, 32 > &DeadInsts) {
660
- if (!II->hasOneUse ())
658
+ IntrinsicInst *IntII, SmallSetVector<Instruction *, 32 > &DeadInsts) {
659
+ if (!IntII->hasOneUse ())
660
+ return false ;
661
+ Instruction *StoredBy = dyn_cast<Instruction>(IntII->user_back ());
662
+ if (!StoredBy)
661
663
return false ;
662
- Value *StoredBy = II->user_back ();
663
664
if (!isa<StoreInst, VPIntrinsic>(StoredBy))
664
665
return false ;
665
666
666
- SmallVector<Value *, 8 > InterleaveValues (II ->args ());
667
- const unsigned Factor = getInterleaveIntrinsicFactor (II ->getIntrinsicID ());
667
+ SmallVector<Value *, 8 > InterleaveValues (IntII ->args ());
668
+ const unsigned Factor = getInterleaveIntrinsicFactor (IntII ->getIntrinsicID ());
668
669
assert (Factor && " unexpected interleave intrinsic" );
669
670
670
671
Value *Mask = nullptr ;
@@ -679,24 +680,23 @@ bool InterleavedAccessImpl::lowerInterleaveIntrinsic(
679
680
return false ;
680
681
681
682
LLVM_DEBUG (dbgs () << " IA: Found a vp.store with interleave intrinsic "
682
- << *II << " and factor = " << Factor << " \n " );
683
+ << *IntII << " and factor = " << Factor << " \n " );
683
684
} else {
684
685
auto *SI = cast<StoreInst>(StoredBy);
685
686
if (!SI->isSimple ())
686
687
return false ;
687
688
688
- LLVM_DEBUG (dbgs () << " IA: Found a store with interleave intrinsic " << *II
689
- << " and factor = " << Factor << " \n " );
689
+ LLVM_DEBUG (dbgs () << " IA: Found a store with interleave intrinsic "
690
+ << *IntII << " and factor = " << Factor << " \n " );
690
691
}
691
692
692
693
// Try and match this with target specific intrinsics.
693
- if (!TLI->lowerInterleaveIntrinsicToStore (cast<Instruction>(StoredBy), Mask,
694
- InterleaveValues))
694
+ if (!TLI->lowerInterleaveIntrinsicToStore (StoredBy, Mask, InterleaveValues))
695
695
return false ;
696
696
697
697
// We now have a target-specific store, so delete the old one.
698
- DeadInsts.insert (cast<Instruction>( StoredBy) );
699
- DeadInsts.insert (II );
698
+ DeadInsts.insert (StoredBy);
699
+ DeadInsts.insert (IntII );
700
700
return true ;
701
701
}
702
702
0 commit comments