summaryrefslogtreecommitdiff
path: root/lib/Target/AArch64
diff options
context:
space:
mode:
authorAbderrazek Zaafrani <a.zaafrani@samsung.com>2017-12-08 00:58:49 +0000
committerAbderrazek Zaafrani <a.zaafrani@samsung.com>2017-12-08 00:58:49 +0000
commit94240acddc937163335dbd919f2d6833bd9079b0 (patch)
treef42f590852c335e3c706f7cfa6acd273e7ef0bcc /lib/Target/AArch64
parented1cb75099f89e02b0e4a2d90f1cf31572c8a9dd (diff)
[AArch64] Avoid SIMD interleaved store instruction for Exynos.
Replace interleaved store instructions by equivalent and more efficient instructions based on latency cost model. Https://reviews.llvm.org/D38196 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@320123 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/AArch64')
-rw-r--r--lib/Target/AArch64/AArch64.h4
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.cpp4
-rw-r--r--lib/Target/AArch64/AArch64VectorByElementOpt.cpp583
3 files changed, 469 insertions, 122 deletions
diff --git a/lib/Target/AArch64/AArch64.h b/lib/Target/AArch64/AArch64.h
index 1dda746a6be..edda13ce97e 100644
--- a/lib/Target/AArch64/AArch64.h
+++ b/lib/Target/AArch64/AArch64.h
@@ -39,7 +39,7 @@ FunctionPass *createAArch64ISelDag(AArch64TargetMachine &TM,
FunctionPass *createAArch64StorePairSuppressPass();
FunctionPass *createAArch64ExpandPseudoPass();
FunctionPass *createAArch64LoadStoreOptimizationPass();
-FunctionPass *createAArch64VectorByElementOptPass();
+FunctionPass *createAArch64SIMDInstrOptPass();
ModulePass *createAArch64PromoteConstantPass();
FunctionPass *createAArch64ConditionOptimizerPass();
FunctionPass *createAArch64A57FPLoadBalancing();
@@ -64,7 +64,7 @@ void initializeAArch64ConditionOptimizerPass(PassRegistry&);
void initializeAArch64DeadRegisterDefinitionsPass(PassRegistry&);
void initializeAArch64ExpandPseudoPass(PassRegistry&);
void initializeAArch64LoadStoreOptPass(PassRegistry&);
-void initializeAArch64VectorByElementOptPass(PassRegistry&);
+void initializeAArch64SIMDInstrOptPass(PassRegistry&);
void initializeAArch64PromoteConstantPass(PassRegistry&);
void initializeAArch64RedundantCopyEliminationPass(PassRegistry&);
void initializeAArch64StorePairSuppressPass(PassRegistry&);
diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp
index 2cf0a49896e..21b8bfff52c 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -157,7 +157,7 @@ extern "C" void LLVMInitializeAArch64Target() {
initializeAArch64DeadRegisterDefinitionsPass(*PR);
initializeAArch64ExpandPseudoPass(*PR);
initializeAArch64LoadStoreOptPass(*PR);
- initializeAArch64VectorByElementOptPass(*PR);
+ initializeAArch64SIMDInstrOptPass(*PR);
initializeAArch64PromoteConstantPass(*PR);
initializeAArch64RedundantCopyEliminationPass(*PR);
initializeAArch64StorePairSuppressPass(*PR);
@@ -473,7 +473,7 @@ bool AArch64PassConfig::addILPOpts() {
addPass(&EarlyIfConverterID);
if (EnableStPairSuppress)
addPass(createAArch64StorePairSuppressPass());
- addPass(createAArch64VectorByElementOptPass());
+ addPass(createAArch64SIMDInstrOptPass());
return true;
}
diff --git a/lib/Target/AArch64/AArch64VectorByElementOpt.cpp b/lib/Target/AArch64/AArch64VectorByElementOpt.cpp
index 7ea2fc88f4d..3ff4155849a 100644
--- a/lib/Target/AArch64/AArch64VectorByElementOpt.cpp
+++ b/lib/Target/AArch64/AArch64VectorByElementOpt.cpp
@@ -1,4 +1,3 @@
-//=- AArch64VectorByElementOpt.cpp - AArch64 vector by element inst opt pass =//
//
// The LLVM Compiler Infrastructure
//
@@ -7,19 +6,27 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains a pass that performs optimization for vector by element
-// SIMD instructions.
-//
-// Certain SIMD instructions with vector element operand are not efficient.
-// Rewrite them into SIMD instructions with vector operands. This rewrite
-// is driven by the latency of the instructions.
+// This file contains a pass that performs optimization on SIMD instructions
+// with high latency by splitting them into more efficient series of
+// instructions.
//
+// 1. Rewrite certain SIMD instructions with vector element due to their
+// inefficiency on some targets.
// Example:
// fmla v0.4s, v1.4s, v2.s[1]
// is rewritten into
// dup v3.4s, v2.s[1]
// fmla v0.4s, v1.4s, v3.4s
//
+// 2. Rewrite Interleaved memory access instructions due to their
+// inefficiency on some targets.
+// Example:
+// st2 {v0.4s, v1.4s}, addr
+// is rewritten into
+// zip1 v2.4s, v0.4s, v1.4s
+// zip2 v3.4s, v0.4s, v1.4s
+// stp q2, q3, addr
+//
//===----------------------------------------------------------------------===//
#include "AArch64InstrInfo.h"
@@ -39,51 +46,128 @@
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/Pass.h"
-#include <map>
+#include <unordered_map>
using namespace llvm;
-#define DEBUG_TYPE "aarch64-vectorbyelement-opt"
+#define DEBUG_TYPE "aarch64-simdinstr-opt"
STATISTIC(NumModifiedInstr,
- "Number of vector by element instructions modified");
+ "Number of SIMD instructions modified");
#define AARCH64_VECTOR_BY_ELEMENT_OPT_NAME \
- "AArch64 vector by element instruction optimization pass"
+ "AArch64 SIMD instructions optimization pass"
namespace {
-struct AArch64VectorByElementOpt : public MachineFunctionPass {
+struct AArch64SIMDInstrOpt : public MachineFunctionPass {
static char ID;
const TargetInstrInfo *TII;
MachineRegisterInfo *MRI;
TargetSchedModel SchedModel;
- AArch64VectorByElementOpt() : MachineFunctionPass(ID) {
- initializeAArch64VectorByElementOptPass(*PassRegistry::getPassRegistry());
+ // The two maps below are used to cache decisions instead of recomputing:
+ // This is used to cache instruction replacement decisions within function
+ // units and across function units.
+ std::map<std::pair<unsigned, std::string>, bool> SIMDInstrTable;
+ // This is used to cache the decision of whether to leave the Interleave-Store
+ // instructions replacement pass early or not for a particular target.
+ std::unordered_map<std::string, bool> InterlEarlyExit;
+
+ typedef enum {
+ VectorElem,
+ Interleave
+ } Subpass;
+
+ // Instruction represented by OrigOpc is replaced by instructions in ReplOpc.
+ struct InstReplInfo {
+ unsigned OrigOpc;
+ std::vector<unsigned> ReplOpc;
+ const TargetRegisterClass RC;
+ };
+
+#define RuleST2(OpcOrg, OpcR0, OpcR1, OpcR2, RC) \
+ {OpcOrg, {OpcR0, OpcR1, OpcR2}, RC}
+#define RuleST4(OpcOrg, OpcR0, OpcR1, OpcR2, OpcR3, OpcR4, OpcR5, OpcR6, \
+ OpcR7, OpcR8, OpcR9, RC) \
+ {OpcOrg, {OpcR0, OpcR1, OpcR2, OpcR3, OpcR4, OpcR5, OpcR6, OpcR7, \
+ OpcR8, OpcR9}, RC}
+
+ // The Instruction Replacement Table:
+ std::vector<InstReplInfo> IRT = {
+ // ST2 instructions
+ RuleST2(AArch64::ST2Twov2d, AArch64::ZIP1v2i64, AArch64::ZIP2v2i64,
+ AArch64::STPQi, AArch64::FPR128RegClass),
+ RuleST2(AArch64::ST2Twov4s, AArch64::ZIP1v4i32, AArch64::ZIP2v4i32,
+ AArch64::STPQi, AArch64::FPR128RegClass),
+ RuleST2(AArch64::ST2Twov2s, AArch64::ZIP1v2i32, AArch64::ZIP2v2i32,
+ AArch64::STPDi, AArch64::FPR64RegClass),
+ RuleST2(AArch64::ST2Twov8h, AArch64::ZIP1v8i16, AArch64::ZIP2v8i16,
+ AArch64::STPQi, AArch64::FPR128RegClass),
+ RuleST2(AArch64::ST2Twov4h, AArch64::ZIP1v4i16, AArch64::ZIP2v4i16,
+ AArch64::STPDi, AArch64::FPR64RegClass),
+ RuleST2(AArch64::ST2Twov16b, AArch64::ZIP1v16i8, AArch64::ZIP2v16i8,
+ AArch64::STPQi, AArch64::FPR128RegClass),
+ RuleST2(AArch64::ST2Twov8b, AArch64::ZIP1v8i8, AArch64::ZIP2v8i8,
+ AArch64::STPDi, AArch64::FPR64RegClass),
+ // ST4 instructions
+ RuleST4(AArch64::ST4Fourv2d, AArch64::ZIP1v2i64, AArch64::ZIP2v2i64,
+ AArch64::ZIP1v2i64, AArch64::ZIP2v2i64, AArch64::ZIP1v2i64,
+ AArch64::ZIP2v2i64, AArch64::ZIP1v2i64, AArch64::ZIP2v2i64,
+ AArch64::STPQi, AArch64::STPQi, AArch64::FPR128RegClass),
+ RuleST4(AArch64::ST4Fourv4s, AArch64::ZIP1v4i32, AArch64::ZIP2v4i32,
+ AArch64::ZIP1v4i32, AArch64::ZIP2v4i32, AArch64::ZIP1v4i32,
+ AArch64::ZIP2v4i32, AArch64::ZIP1v4i32, AArch64::ZIP2v4i32,
+ AArch64::STPQi, AArch64::STPQi, AArch64::FPR128RegClass),
+ RuleST4(AArch64::ST4Fourv2s, AArch64::ZIP1v2i32, AArch64::ZIP2v2i32,
+ AArch64::ZIP1v2i32, AArch64::ZIP2v2i32, AArch64::ZIP1v2i32,
+ AArch64::ZIP2v2i32, AArch64::ZIP1v2i32, AArch64::ZIP2v2i32,
+ AArch64::STPDi, AArch64::STPDi, AArch64::FPR64RegClass),
+ RuleST4(AArch64::ST4Fourv8h, AArch64::ZIP1v8i16, AArch64::ZIP2v8i16,
+ AArch64::ZIP1v8i16, AArch64::ZIP2v8i16, AArch64::ZIP1v8i16,
+ AArch64::ZIP2v8i16, AArch64::ZIP1v8i16, AArch64::ZIP2v8i16,
+ AArch64::STPQi, AArch64::STPQi, AArch64::FPR128RegClass),
+ RuleST4(AArch64::ST4Fourv4h, AArch64::ZIP1v4i16, AArch64::ZIP2v4i16,
+ AArch64::ZIP1v4i16, AArch64::ZIP2v4i16, AArch64::ZIP1v4i16,
+ AArch64::ZIP2v4i16, AArch64::ZIP1v4i16, AArch64::ZIP2v4i16,
+ AArch64::STPDi, AArch64::STPDi, AArch64::FPR64RegClass),
+ RuleST4(AArch64::ST4Fourv16b, AArch64::ZIP1v16i8, AArch64::ZIP2v16i8,
+ AArch64::ZIP1v16i8, AArch64::ZIP2v16i8, AArch64::ZIP1v16i8,
+ AArch64::ZIP2v16i8, AArch64::ZIP1v16i8, AArch64::ZIP2v16i8,
+ AArch64::STPQi, AArch64::STPQi, AArch64::FPR128RegClass),
+ RuleST4(AArch64::ST4Fourv8b, AArch64::ZIP1v8i8, AArch64::ZIP2v8i8,
+ AArch64::ZIP1v8i8, AArch64::ZIP2v8i8, AArch64::ZIP1v8i8,
+ AArch64::ZIP2v8i8, AArch64::ZIP1v8i8, AArch64::ZIP2v8i8,
+ AArch64::STPDi, AArch64::STPDi, AArch64::FPR64RegClass)
+ };
+
+ // A costly instruction is replaced in this work by N efficient instructions
+ // The maximum of N is curently 10 and it is for ST4 case.
+ static const unsigned MaxNumRepl = 10;
+
+ AArch64SIMDInstrOpt() : MachineFunctionPass(ID) {
+ initializeAArch64SIMDInstrOptPass(*PassRegistry::getPassRegistry());
}
/// Based only on latency of instructions, determine if it is cost efficient
- /// to replace the instruction InstDesc by the two instructions InstDescRep1
- /// and InstDescRep2.
- /// Return true if replacement is recommended.
- bool
- shouldReplaceInstruction(MachineFunction *MF, const MCInstrDesc *InstDesc,
- const MCInstrDesc *InstDescRep1,
- const MCInstrDesc *InstDescRep2,
- std::map<unsigned, bool> &VecInstElemTable) const;
-
- /// Determine if we need to exit the vector by element instruction
- /// optimization pass early. This makes sure that Targets with no need
- /// for this optimization do not spent any compile time on this pass.
- /// This check is done by comparing the latency of an indexed FMLA
- /// instruction to the latency of the DUP + the latency of a vector
- /// FMLA instruction. We do not check on other related instructions such
- /// as FMLS as we assume that if the situation shows up for one
- /// instruction, then it is likely to show up for the related ones.
- /// Return true if early exit of the pass is recommended.
- bool earlyExitVectElement(MachineFunction *MF);
+ /// to replace the instruction InstDesc by the instructions stored in the
+ /// array InstDescRepl.
+ /// Return true if replacement is expected to be faster.
+ bool shouldReplaceInst(MachineFunction *MF, const MCInstrDesc *InstDesc,
+ SmallVectorImpl<const MCInstrDesc*> &ReplInstrMCID);
+
+ /// Determine if we need to exit the instruction replacement optimization
+ /// subpasses early. This makes sure that Targets with no need for this
+ /// optimization do not spend any compile time on this subpass other than the
+ /// simple check performed here. This simple check is done by comparing the
+ /// latency of the original instruction to the latency of the replacement
+ /// instructions. We only check for a representative instruction in the class
+ /// of instructions and not all concerned instructions. For the VectorElem
+ /// subpass, we check for the FMLA instruction while for the interleave subpass
+ /// we check for the st2.4s instruction.
+ /// Return true if early exit of the subpass is recommended.
+ bool shouldExitEarly(MachineFunction *MF, Subpass SP);
/// Check whether an equivalent DUP instruction has already been
/// created or not.
@@ -96,8 +180,24 @@ struct AArch64VectorByElementOpt : public MachineFunctionPass {
/// Rewrite them into SIMD instructions with vector operands. This rewrite
/// is driven by the latency of the instructions.
/// Return true if the SIMD instruction is modified.
- bool optimizeVectElement(MachineInstr &MI,
- std::map<unsigned, bool> *VecInstElemTable) const;
+ bool optimizeVectElement(MachineInstr &MI);
+
+ /// Process The REG_SEQUENCE instruction, and extract the source
+ /// operands of the st2/4 instruction from it.
+ /// Example of such instructions.
+ /// %dest = REG_SEQUENCE %st2_src1, dsub0, %st2_src2, dsub1;
+ /// Return true when the instruction is processed successfully.
+ bool processSeqRegInst(MachineInstr *DefiningMI, unsigned* StReg,
+ unsigned* StRegKill, unsigned NumArg) const;
+
+ /// Load/Store Interleaving instructions are not always beneficial.
+ /// Replace them by zip instructionand classical load/store.
+ /// Return true if the SIMD instruction is modified.
+ bool optimizeLdStInterleave(MachineInstr &MI);
+
+ /// Return the number of useful source registers for this
+ /// instruction (2 for st2 and 4 for st4).
+ unsigned determineSrcReg(MachineInstr &MI) const;
bool runOnMachineFunction(MachineFunction &Fn) override;
@@ -106,84 +206,119 @@ struct AArch64VectorByElementOpt : public MachineFunctionPass {
}
};
-char AArch64VectorByElementOpt::ID = 0;
+char AArch64SIMDInstrOpt::ID = 0;
} // end anonymous namespace
-INITIALIZE_PASS(AArch64VectorByElementOpt, "aarch64-vectorbyelement-opt",
+INITIALIZE_PASS(AArch64SIMDInstrOpt, "aarch64-simdinstr-opt",
AARCH64_VECTOR_BY_ELEMENT_OPT_NAME, false, false)
/// Based only on latency of instructions, determine if it is cost efficient
-/// to replace the instruction InstDesc by the two instructions InstDescRep1
-/// and InstDescRep2. Note that it is assumed in this fuction that an
-/// instruction of type InstDesc is always replaced by the same two
-/// instructions as results are cached here.
-/// Return true if replacement is recommended.
-bool AArch64VectorByElementOpt::shouldReplaceInstruction(
- MachineFunction *MF, const MCInstrDesc *InstDesc,
- const MCInstrDesc *InstDescRep1, const MCInstrDesc *InstDescRep2,
- std::map<unsigned, bool> &VecInstElemTable) const {
- // Check if replacment decision is alredy available in the cached table.
+/// to replace the instruction InstDesc by the instructions stored in the
+/// array InstDescRepl.
+/// Return true if replacement is expected to be faster.
+bool AArch64SIMDInstrOpt::
+shouldReplaceInst(MachineFunction *MF, const MCInstrDesc *InstDesc,
+ SmallVectorImpl<const MCInstrDesc*> &InstDescRepl) {
+ // Check if replacement decision is already available in the cached table.
// if so, return it.
- if (!VecInstElemTable.empty() &&
- VecInstElemTable.find(InstDesc->getOpcode()) != VecInstElemTable.end())
- return VecInstElemTable[InstDesc->getOpcode()];
+ std::string Subtarget = SchedModel.getSubtargetInfo()->getCPU();
+ std::pair<unsigned, std::string> InstID = std::make_pair(InstDesc->getOpcode(), Subtarget);
+ if (!SIMDInstrTable.empty() &&
+ SIMDInstrTable.find(InstID) != SIMDInstrTable.end())
+ return SIMDInstrTable[InstID];
unsigned SCIdx = InstDesc->getSchedClass();
- unsigned SCIdxRep1 = InstDescRep1->getSchedClass();
- unsigned SCIdxRep2 = InstDescRep2->getSchedClass();
const MCSchedClassDesc *SCDesc =
- SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdx);
- const MCSchedClassDesc *SCDescRep1 =
- SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdxRep1);
- const MCSchedClassDesc *SCDescRep2 =
- SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdxRep2);
+ SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdx);
- // If a subtarget does not define resources for any of the instructions
+ // If a subtarget does not define resources for the instructions
// of interest, then return false for no replacement.
- if (!SCDesc->isValid() || SCDesc->isVariant() || !SCDescRep1->isValid() ||
- SCDescRep1->isVariant() || !SCDescRep2->isValid() ||
- SCDescRep2->isVariant()) {
- VecInstElemTable[InstDesc->getOpcode()] = false;
+ const MCSchedClassDesc *SCDescRepl;
+ if (!SCDesc->isValid() || SCDesc->isVariant())
+ {
+ SIMDInstrTable[InstID] = false;
return false;
}
+ for (auto IDesc : InstDescRepl)
+ {
+ SCDescRepl = SchedModel.getMCSchedModel()->getSchedClassDesc(
+ IDesc->getSchedClass());
+ if (!SCDescRepl->isValid() || SCDescRepl->isVariant())
+ {
+ SIMDInstrTable[InstID] = false;
+ return false;
+ }
+ }
+
+ // Replacement cost.
+ unsigned ReplCost = 0;
+ for (auto IDesc :InstDescRepl)
+ ReplCost += SchedModel.computeInstrLatency(IDesc->getOpcode());
- if (SchedModel.computeInstrLatency(InstDesc->getOpcode()) >
- SchedModel.computeInstrLatency(InstDescRep1->getOpcode()) +
- SchedModel.computeInstrLatency(InstDescRep2->getOpcode())) {
- VecInstElemTable[InstDesc->getOpcode()] = true;
+ if (SchedModel.computeInstrLatency(InstDesc->getOpcode()) > ReplCost)
+ {
+ SIMDInstrTable[InstID] = true;
return true;
}
- VecInstElemTable[InstDesc->getOpcode()] = false;
- return false;
+ else
+ {
+ SIMDInstrTable[InstID] = false;
+ return false;
+ }
}
-/// Determine if we need to exit the vector by element instruction
-/// optimization pass early. This makes sure that Targets with no need
-/// for this optimization do not spent any compile time on this pass.
-/// This check is done by comparing the latency of an indexed FMLA
-/// instruction to the latency of the DUP + the latency of a vector
-/// FMLA instruction. We do not check on other related instructions such
-/// as FMLS as we assume that if the situation shows up for one
-/// instruction, then it is likely to show up for the related ones.
-/// Return true if early exit of the pass is recommended.
-bool AArch64VectorByElementOpt::earlyExitVectElement(MachineFunction *MF) {
- std::map<unsigned, bool> VecInstElemTable;
- const MCInstrDesc *IndexMulMCID = &TII->get(AArch64::FMLAv4i32_indexed);
- const MCInstrDesc *DupMCID = &TII->get(AArch64::DUPv4i32lane);
- const MCInstrDesc *MulMCID = &TII->get(AArch64::FMULv4f32);
-
- if (!shouldReplaceInstruction(MF, IndexMulMCID, DupMCID, MulMCID,
- VecInstElemTable))
- return true;
- return false;
+/// Determine if we need to exit the instruction replacement optimization
+/// subpasses early. This makes sure that Targets with no need for this
+/// optimization do not spend any compile time on this subpass other than the
+/// simple check performed here. This simple check is done by comparing the
+/// latency of the original instruction to the latency of the replacement
+/// instructions. We only check for a representative instruction in the class of
+/// instructions and not all concerned instructions. For the VectorElem subpass,
+/// we check for the FMLA instruction while for the interleave subpass we check
+/// for the st2.4s instruction.
+/// Return true if early exit of the subpass is recommended.
+bool AArch64SIMDInstrOpt::shouldExitEarly(MachineFunction *MF, Subpass SP) {
+ const MCInstrDesc* OriginalMCID;
+ SmallVector<const MCInstrDesc*, MaxNumRepl> ReplInstrMCID;
+
+ switch (SP) {
+ case VectorElem:
+ OriginalMCID = &TII->get(AArch64::FMLAv4i32_indexed);
+ ReplInstrMCID.push_back(&TII->get(AArch64::DUPv4i32lane));
+ ReplInstrMCID.push_back(&TII->get(AArch64::FMULv4f32));
+ if (shouldReplaceInst(MF, OriginalMCID, ReplInstrMCID))
+ return false;
+ break;
+ case Interleave:
+ // Check if early exit decision is already available in the cached
+ // table or not.
+ std::string Subtarget = SchedModel.getSubtargetInfo()->getCPU();
+ if (InterlEarlyExit.find(Subtarget) != InterlEarlyExit.end())
+ return InterlEarlyExit[Subtarget];
+
+ for (auto &I : IRT) {
+ OriginalMCID = &TII->get(I.OrigOpc);
+ for (auto &Repl : I.ReplOpc)
+ ReplInstrMCID.push_back(&TII->get(Repl));
+ if (shouldReplaceInst(MF, OriginalMCID, ReplInstrMCID)) {
+ InterlEarlyExit[Subtarget] = false;
+ return false;
+ }
+ ReplInstrMCID.clear();
+ }
+ InterlEarlyExit[Subtarget] = true;
+ break;
+ }
+
+ return true;
}
/// Check whether an equivalent DUP instruction has already been
/// created or not.
/// Return true when the dup instruction already exists. In this case,
/// DestReg will point to the destination of the already created DUP.
-bool AArch64VectorByElementOpt::reuseDUP(MachineInstr &MI, unsigned DupOpcode,
+bool AArch64SIMDInstrOpt::reuseDUP(MachineInstr &MI, unsigned DupOpcode,
unsigned SrcReg, unsigned LaneNumber,
unsigned *DestReg) const {
for (MachineBasicBlock::iterator MII = MI, MIE = MI.getParent()->begin();
@@ -215,8 +350,7 @@ bool AArch64VectorByElementOpt::reuseDUP(MachineInstr &MI, unsigned DupOpcode,
/// dup v3.4s, v2.s[1] // dup not necessary if redundant
/// fmla v0.4s, v1.4s, v3.4s
/// Return true if the SIMD instruction is modified.
-bool AArch64VectorByElementOpt::optimizeVectElement(
- MachineInstr &MI, std::map<unsigned, bool> *VecInstElemTable) const {
+bool AArch64SIMDInstrOpt::optimizeVectElement(MachineInstr &MI) {
const MCInstrDesc *MulMCID, *DupMCID;
const TargetRegisterClass *RC = &AArch64::FPR128RegClass;
@@ -283,9 +417,11 @@ bool AArch64VectorByElementOpt::optimizeVectElement(
break;
}
- if (!shouldReplaceInstruction(MI.getParent()->getParent(),
- &TII->get(MI.getOpcode()), DupMCID, MulMCID,
- *VecInstElemTable))
+ SmallVector<const MCInstrDesc*, 2> ReplInstrMCID;
+ ReplInstrMCID.push_back(DupMCID);
+ ReplInstrMCID.push_back(MulMCID);
+ if (!shouldReplaceInst(MI.getParent()->getParent(), &TII->get(MI.getOpcode()),
+ ReplInstrMCID))
return false;
const DebugLoc &DL = MI.getDebugLoc();
@@ -305,7 +441,6 @@ bool AArch64VectorByElementOpt::optimizeVectElement(
unsigned SrcReg2 = MI.getOperand(3).getReg();
unsigned Src2IsKill = getKillRegState(MI.getOperand(3).isKill());
unsigned LaneNumber = MI.getOperand(4).getImm();
-
// Create a new DUP instruction. Note that if an equivalent DUP instruction
// has already been created before, then use that one instread of creating
// a new one.
@@ -338,7 +473,217 @@ bool AArch64VectorByElementOpt::optimizeVectElement(
return true;
}
-bool AArch64VectorByElementOpt::runOnMachineFunction(MachineFunction &MF) {
+/// Load/Store Interleaving instructions are not always beneficial.
+/// Replace them by zip instructions and classical load/store.
+///
+/// Example:
+/// st2 {v0.4s, v1.4s}, addr
+/// is rewritten into
+/// zip1 v2.4s, v0.4s, v1.4s
+/// zip2 v3.4s, v0.4s, v1.4s
+/// stp q2, q3, addr
+//
+/// Example:
+/// st4 {v0.4s, v1.4s, v2.4s, v3.4s}, addr
+/// is rewritten into
+/// zip1 v4.4s, v0.4s, v2.4s
+/// zip2 v5.4s, v0.4s, v2.4s
+/// zip1 v6.4s, v1.4s, v3.4s
+/// zip2 v7.4s, v1.4s, v3.4s
+/// zip1 v8.4s, v4.4s, v6.4s
+/// zip2 v9.4s, v4.4s, v6.4s
+/// zip1 v10.4s, v5.4s, v7.4s
+/// zip2 v11.4s, v5.4s, v7.4s
+/// stp q8, q9, addr
+/// stp q10, q11, addr+32
+/// Currently only instructions related to st2 and st4 are considered.
+/// Other may be added later.
+/// Return true if the SIMD instruction is modified.
+bool AArch64SIMDInstrOpt::optimizeLdStInterleave(MachineInstr &MI) {
+
+ unsigned SeqReg, AddrReg;
+ unsigned StReg[4], StRegKill[4];
+ MachineInstr *DefiningMI;
+ const DebugLoc &DL = MI.getDebugLoc();
+ MachineBasicBlock &MBB = *MI.getParent();
+ SmallVector<unsigned, MaxNumRepl> ZipDest;
+ SmallVector<const MCInstrDesc*, MaxNumRepl> ReplInstrMCID;
+
+ // If current instruction matches any of the rewriting rules, then
+ // gather information about parameters of the new instructions.
+ bool Match = false;
+ for (auto &I : IRT) {
+ if (MI.getOpcode() == I.OrigOpc) {
+ SeqReg = MI.getOperand(0).getReg();
+ AddrReg = MI.getOperand(1).getReg();
+ DefiningMI = MRI->getUniqueVRegDef(SeqReg);
+ unsigned NumReg = determineSrcReg(MI);
+ if (!processSeqRegInst(DefiningMI, StReg, StRegKill, NumReg))
+ return false;
+
+ for (auto &Repl : I.ReplOpc) {
+ ReplInstrMCID.push_back(&TII->get(Repl));
+ // Generate destination registers but only for non-store instruction.
+ if (Repl != AArch64::STPQi && Repl != AArch64::STPDi)
+ ZipDest.push_back(MRI->createVirtualRegister(&I.RC));
+ }
+ Match = true;
+ break;
+ }
+ }
+
+ if (!Match)
+ return false;
+
+ // Determine if it is profitable to replace MI by the series of instructions
+ // represented in ReplInstrMCID.
+ if (!shouldReplaceInst(MI.getParent()->getParent(), &TII->get(MI.getOpcode()),
+ ReplInstrMCID))
+ return false;
+
+ // Generate the replacement instructions composed of zip1, zip2, and stp (at
+ // this point, the code generation is hardcoded and does not rely on the IRT
+ // table used above given that code generation for ST2 replacement is somewhat
+ // different than for ST4 replacement. We could have added more info into the
+ // table related to how we build new instructions but we may be adding more
+ // complexity with that).
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case AArch64::ST2Twov16b:
+ case AArch64::ST2Twov8b:
+ case AArch64::ST2Twov8h:
+ case AArch64::ST2Twov4h:
+ case AArch64::ST2Twov4s:
+ case AArch64::ST2Twov2s:
+ case AArch64::ST2Twov2d:
+ // zip instructions
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[0], ZipDest[0])
+ .addReg(StReg[0])
+ .addReg(StReg[1]);
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[1], ZipDest[1])
+ .addReg(StReg[0], StRegKill[0])
+ .addReg(StReg[1], StRegKill[1]);
+ // stp instructions
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[2])
+ .addReg(ZipDest[0])
+ .addReg(ZipDest[1])
+ .addReg(AddrReg)
+ .addImm(0);
+ break;
+ case AArch64::ST4Fourv16b:
+ case AArch64::ST4Fourv8b:
+ case AArch64::ST4Fourv8h:
+ case AArch64::ST4Fourv4h:
+ case AArch64::ST4Fourv4s:
+ case AArch64::ST4Fourv2s:
+ case AArch64::ST4Fourv2d:
+ // zip instructions
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[0], ZipDest[0])
+ .addReg(StReg[0])
+ .addReg(StReg[2]);
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[1], ZipDest[1])
+ .addReg(StReg[0], StRegKill[0])
+ .addReg(StReg[2], StRegKill[2]);
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[2], ZipDest[2])
+ .addReg(StReg[1])
+ .addReg(StReg[3]);
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[3], ZipDest[3])
+ .addReg(StReg[1], StRegKill[1])
+ .addReg(StReg[3], StRegKill[3]);
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[4], ZipDest[4])
+ .addReg(ZipDest[0])
+ .addReg(ZipDest[2]);
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[5], ZipDest[5])
+ .addReg(ZipDest[0])
+ .addReg(ZipDest[2]);
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[6], ZipDest[6])
+ .addReg(ZipDest[1])
+ .addReg(ZipDest[3]);
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[7], ZipDest[7])
+ .addReg(ZipDest[1])
+ .addReg(ZipDest[3]);
+ // stp instructions
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[8])
+ .addReg(ZipDest[4])
+ .addReg(ZipDest[5])
+ .addReg(AddrReg)
+ .addImm(0);
+ BuildMI(MBB, MI, DL, *ReplInstrMCID[9])
+ .addReg(ZipDest[6])
+ .addReg(ZipDest[7])
+ .addReg(AddrReg)
+ .addImm(2);
+ break;
+ }
+
+ ++NumModifiedInstr;
+ return true;
+}
+
+/// Process The REG_SEQUENCE instruction, and extract the source
+/// operands of the st2/4 instruction from it.
+/// Example of such instruction.
+/// %dest = REG_SEQUENCE %st2_src1, dsub0, %st2_src2, dsub1;
+/// Return true when the instruction is processed successfully.
+bool AArch64SIMDInstrOpt::processSeqRegInst(MachineInstr *DefiningMI,
+ unsigned* StReg, unsigned* StRegKill, unsigned NumArg) const {
+ assert (DefiningMI != NULL);
+ if (DefiningMI->getOpcode() != AArch64::REG_SEQUENCE)
+ return false;
+
+ for (unsigned i=0; i<NumArg; i++) {
+ StReg[i] = DefiningMI->getOperand(2*i+1).getReg();
+ StRegKill[i] = getKillRegState(DefiningMI->getOperand(2*i+1).isKill());
+
+ // Sanity check for the other arguments.
+ if (DefiningMI->getOperand(2*i+2).isImm()) {
+ switch (DefiningMI->getOperand(2*i+2).getImm()) {
+ default:
+ return false;
+ case AArch64::dsub0:
+ case AArch64::dsub1:
+ case AArch64::dsub2:
+ case AArch64::dsub3:
+ case AArch64::qsub0:
+ case AArch64::qsub1:
+ case AArch64::qsub2:
+ case AArch64::qsub3:
+ break;
+ }
+ }
+ else
+ return false;
+ }
+ return true;
+}
+
+/// Return the number of useful source registers for this instruction
+/// (2 for ST2 and 4 for ST4).
+unsigned AArch64SIMDInstrOpt::determineSrcReg(MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("Unsupported instruction for this pass");
+ case AArch64::ST2Twov16b:
+ case AArch64::ST2Twov8b:
+ case AArch64::ST2Twov8h:
+ case AArch64::ST2Twov4h:
+ case AArch64::ST2Twov4s:
+ case AArch64::ST2Twov2s:
+ case AArch64::ST2Twov2d:
+ return 2;
+ case AArch64::ST4Fourv16b:
+ case AArch64::ST4Fourv8b:
+ case AArch64::ST4Fourv8h:
+ case AArch64::ST4Fourv4h:
+ case AArch64::ST4Fourv4s:
+ case AArch64::ST4Fourv2s:
+ case AArch64::ST4Fourv2d:
+ return 4;
+ }
+}
+
+bool AArch64SIMDInstrOpt::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(*MF.getFunction()))
return false;
@@ -353,36 +698,38 @@ bool AArch64VectorByElementOpt::runOnMachineFunction(MachineFunction &MF) {
if (!SchedModel.hasInstrSchedModel())
return false;
- // A simple check to exit this pass early for targets that do not need it.
- if (earlyExitVectElement(&MF))
- return false;
-
bool Changed = false;
- std::map<unsigned, bool> VecInstElemTable;
- SmallVector<MachineInstr *, 8> RemoveMIs;
-
- for (MachineBasicBlock &MBB : MF) {
- for (MachineBasicBlock::iterator MII = MBB.begin(), MIE = MBB.end();
- MII != MIE;) {
- MachineInstr &MI = *MII;
- if (optimizeVectElement(MI, &VecInstElemTable)) {
- // Add MI to the list of instructions to be removed given that it has
- // been replaced.
- RemoveMIs.push_back(&MI);
- Changed = true;
+ for (auto OptimizationKind : {VectorElem, Interleave}) {
+ if (!shouldExitEarly(&MF, OptimizationKind)) {
+ SmallVector<MachineInstr *, 8> RemoveMIs;
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineBasicBlock::iterator MII = MBB.begin(), MIE = MBB.end();
+ MII != MIE;) {
+ MachineInstr &MI = *MII;
+ bool InstRewrite;
+ if (OptimizationKind == VectorElem)
+ InstRewrite = optimizeVectElement(MI) ;
+ else
+ InstRewrite = optimizeLdStInterleave(MI);
+ if (InstRewrite) {
+ // Add MI to the list of instructions to be removed given that it
+ // has been replaced.
+ RemoveMIs.push_back(&MI);
+ Changed = true;
+ }
+ ++MII;
+ }
}
- ++MII;
+ for (MachineInstr *MI : RemoveMIs)
+ MI->eraseFromParent();
}
}
- for (MachineInstr *MI : RemoveMIs)
- MI->eraseFromParent();
-
return Changed;
}
-/// createAArch64VectorByElementOptPass - returns an instance of the
+/// createAArch64SIMDInstrOptPass - returns an instance of the
/// vector by element optimization pass.
-FunctionPass *llvm::createAArch64VectorByElementOptPass() {
- return new AArch64VectorByElementOpt();
+FunctionPass *llvm::createAArch64SIMDInstrOptPass() {
+ return new AArch64SIMDInstrOpt();
}