@@ -638,6 +638,7 @@ bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI,
638638 if (!LI->isSimple () || !LI->hasOneUse () || LI->getParent () != SI->getParent ())
639639 return false ;
640640
641+ BatchAAResults BAA (*AA);
641642 auto *T = LI->getType ();
642643 // Don't introduce calls to memcpy/memmove intrinsics out of thin air if
643644 // the corresponding libcalls are not available.
@@ -653,12 +654,11 @@ bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI,
653654 // We use MSSA to check if an instruction may store to the memory we load
654655 // from in between the load and the store. If such an instruction is found,
655656 // we try to promote there instead of at the store position.
656- BatchAAResults BAA (*AA);
657- auto *Clobber =
658- cast<MemoryUseOrDef>(MSSA->getWalker ()->getClobberingMemoryAccess (
659- StoreAccess->getDefiningAccess (), LoadLoc, BAA));
660- Instruction *P =
661- MSSA->dominates (LoadAccess, Clobber) ? Clobber->getMemoryInst () : SI;
657+ auto *Clobber = MSSA->getWalker ()->getClobberingMemoryAccess (
658+ StoreAccess->getDefiningAccess (), LoadLoc, BAA);
659+ Instruction *P = MSSA->dominates (LoadAccess, Clobber)
660+ ? cast<MemoryUseOrDef>(Clobber)->getMemoryInst ()
661+ : SI;
662662
663663 // If we found an instruction that may write to the loaded memory,
664664 // we can try to promote at this position instead of the store
@@ -706,7 +706,6 @@ bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI,
706706 // Detect cases where we're performing call slot forwarding, but
707707 // happen to be using a load-store pair to implement it, rather than
708708 // a memcpy.
709- BatchAAResults BAA (*AA);
710709 auto GetCall = [&]() -> CallInst * {
711710 // We defer this expensive clobber walk until the cheap checks
712711 // have been done on the source inside performCallSlotOptzn.
0 commit comments