From d73abb30a4dfe3b32f025db1e0b69e8d655be962 Mon Sep 17 00:00:00 2001 From: Mohammad Shahid Date: Wed, 20 Dec 2017 15:26:59 +0000 Subject: Revert r320548:[SLP] Vectorize jumbled memory loads git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@321181 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Analysis/LoopAccessAnalysis.cpp | 71 ------------------------------------- 1 file changed, 71 deletions(-) (limited to 'lib/Analysis/LoopAccessAnalysis.cpp') diff --git a/lib/Analysis/LoopAccessAnalysis.cpp b/lib/Analysis/LoopAccessAnalysis.cpp index ed8e5e8cc48..e141d6c58b6 100644 --- a/lib/Analysis/LoopAccessAnalysis.cpp +++ b/lib/Analysis/LoopAccessAnalysis.cpp @@ -1107,77 +1107,6 @@ static unsigned getAddressSpaceOperand(Value *I) { return -1; } -// TODO:This API can be improved by using the permutation of given width as the -// accesses are entered into the map. -bool llvm::sortLoadAccesses(ArrayRef VL, const DataLayout &DL, - ScalarEvolution &SE, - SmallVectorImpl &Sorted, - SmallVectorImpl *Mask) { - SmallVector, 4> OffValPairs; - OffValPairs.reserve(VL.size()); - Sorted.reserve(VL.size()); - - // Walk over the pointers, and map each of them to an offset relative to - // first pointer in the array. - Value *Ptr0 = getPointerOperand(VL[0]); - const SCEV *Scev0 = SE.getSCEV(Ptr0); - Value *Obj0 = GetUnderlyingObject(Ptr0, DL); - PointerType *PtrTy = dyn_cast(Ptr0->getType()); - uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); - - for (auto *Val : VL) { - // The only kind of access we care about here is load. - if (!isa(Val)) - return false; - - Value *Ptr = getPointerOperand(Val); - assert(Ptr && "Expected value to have a pointer operand."); - // If a pointer refers to a different underlying object, bail - the - // pointers are by definition incomparable. - Value *CurrObj = GetUnderlyingObject(Ptr, DL); - if (CurrObj != Obj0) - return false; - - const SCEVConstant *Diff = - dyn_cast(SE.getMinusSCEV(SE.getSCEV(Ptr), Scev0)); - // The pointers may not have a constant offset from each other, or SCEV - // may just not be smart enough to figure out they do. Regardless, - // there's nothing we can do. - if (!Diff || static_cast(Diff->getAPInt().abs().getSExtValue()) > - (VL.size() - 1) * Size) - return false; - - OffValPairs.emplace_back(Diff->getAPInt().getSExtValue(), Val); - } - SmallVector UseOrder(VL.size()); - for (unsigned i = 0; i < VL.size(); i++) { - UseOrder[i] = i; - } - - // Sort the memory accesses and keep the order of their uses in UseOrder. - std::sort(UseOrder.begin(), UseOrder.end(), - [&OffValPairs](unsigned Left, unsigned Right) { - return OffValPairs[Left].first < OffValPairs[Right].first; - }); - - for (unsigned i = 0; i < VL.size(); i++) - Sorted.emplace_back(OffValPairs[UseOrder[i]].second); - - // Sort UseOrder to compute the Mask. - if (Mask) { - Mask->reserve(VL.size()); - for (unsigned i = 0; i < VL.size(); i++) - Mask->emplace_back(i); - std::sort(Mask->begin(), Mask->end(), - [&UseOrder](unsigned Left, unsigned Right) { - return UseOrder[Left] < UseOrder[Right]; - }); - } - - return true; -} - - /// Returns true if the memory operations \p A and \p B are consecutive. bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType) { -- cgit v1.2.3