summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Visoiu Mistrih <francisvm@yahoo.com>2017-11-30 16:12:24 +0000
committerFrancis Visoiu Mistrih <francisvm@yahoo.com>2017-11-30 16:12:24 +0000
commite6b89910eb5c0a89e5bbdd8ceb3b6394efe6dabc (patch)
tree2888ae660f4d6f45df7a663e14a0187a37679326
parent4a8c2b625b7ed7d95e349cdd45ff6a3df0771bc5 (diff)
[CodeGen] Always use `printReg` to print registers in both MIR and debug
output As part of the unification of the debug format and the MIR format, always use `printReg` to print all kinds of registers. Updated the tests using '_' instead of '%noreg' until we decide which one we want to be the default one. Differential Revision: https://reviews.llvm.org/D40421 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@319445 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/CodeGen/AggressiveAntiDepBreaker.cpp49
-rw-r--r--lib/CodeGen/CriticalAntiDepBreaker.cpp8
-rw-r--r--lib/CodeGen/ExecutionDepsFix.cpp2
-rw-r--r--lib/CodeGen/MIRPrinter.cpp25
-rw-r--r--lib/CodeGen/MachineVerifier.cpp10
-rw-r--r--lib/CodeGen/RegAllocBasic.cpp4
-rw-r--r--lib/CodeGen/RegAllocFast.cpp2
-rw-r--r--lib/CodeGen/RegUsageInfoCollector.cpp2
-rw-r--r--lib/CodeGen/RegisterScavenging.cpp12
-rw-r--r--lib/CodeGen/RegisterUsageInfo.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp10
-rw-r--r--lib/CodeGen/StackMaps.cpp8
-rw-r--r--lib/CodeGen/TargetRegisterInfo.cpp7
-rw-r--r--lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp16
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.cpp10
-rw-r--r--lib/Target/BPF/BPFISelDAGToDAG.cpp3
-rw-r--r--test/CodeGen/AArch64/GlobalISel/debug-insts.ll10
-rw-r--r--test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir8
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir8
-rw-r--r--test/CodeGen/AArch64/machine-outliner-remarks.ll2
-rw-r--r--test/CodeGen/AMDGPU/fadd.ll2
-rw-r--r--test/CodeGen/AMDGPU/inserted-wait-states.mir2
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll2
-rw-r--r--test/CodeGen/AMDGPU/regcoalesce-dbg.mir2
-rw-r--r--test/CodeGen/ARM/ARMLoadStoreDBG.mir40
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll8
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir484
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir80
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir232
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll48
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir24
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir88
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalizer.mir70
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll78
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir70
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-select-globals-pic.mir24
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir40
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir16
-rw-r--r--test/CodeGen/ARM/a15-SD-dep.ll2
-rw-r--r--test/CodeGen/ARM/cmp1-peephole-thumb.mir16
-rw-r--r--test/CodeGen/ARM/cmp2-peephole-thumb.mir22
-rw-r--r--test/CodeGen/ARM/constant-islands-cfg.mir8
-rw-r--r--test/CodeGen/ARM/dbg-range-extension.mir90
-rw-r--r--test/CodeGen/ARM/expand-pseudos.mir22
-rw-r--r--test/CodeGen/ARM/fpoffset_overflow.mir14
-rw-r--r--test/CodeGen/ARM/imm-peephole-arm.mir10
-rw-r--r--test/CodeGen/ARM/imm-peephole-thumb.mir10
-rw-r--r--test/CodeGen/ARM/indirect-hidden.ll2
-rw-r--r--test/CodeGen/ARM/litpool-licm.ll2
-rw-r--r--test/CodeGen/ARM/load_store_opt_kill.mir4
-rw-r--r--test/CodeGen/ARM/local-call.ll2
-rw-r--r--test/CodeGen/ARM/machine-copyprop.mir12
-rw-r--r--test/CodeGen/ARM/misched-int-basic-thumb2.mir32
-rw-r--r--test/CodeGen/ARM/misched-int-basic.mir22
-rw-r--r--test/CodeGen/ARM/pei-swiftself.mir2
-rw-r--r--test/CodeGen/ARM/pr25317.ll2
-rw-r--r--test/CodeGen/ARM/preferred-align.ll2
-rw-r--r--test/CodeGen/ARM/prera-ldst-aliasing.mir10
-rw-r--r--test/CodeGen/ARM/prera-ldst-insertpt.mir56
-rw-r--r--test/CodeGen/ARM/scavenging.mir32
-rw-r--r--test/CodeGen/ARM/sched-it-debug-nodes.mir36
-rw-r--r--test/CodeGen/ARM/single-issue-r52.mir8
-rw-r--r--test/CodeGen/ARM/tail-dup-bundle.mir6
-rw-r--r--test/CodeGen/ARM/thumb-litpool.ll2
-rw-r--r--test/CodeGen/ARM/v6-jumptable-clobber.mir32
-rw-r--r--test/CodeGen/ARM/vcvt_combine.ll2
-rw-r--r--test/CodeGen/ARM/vdiv_combine.ll2
-rw-r--r--test/CodeGen/ARM/virtregrewriter-subregliveness.mir6
-rw-r--r--test/CodeGen/ARM/vldm-liveness.mir16
-rw-r--r--test/CodeGen/Hexagon/duplex.ll2
-rw-r--r--test/CodeGen/Hexagon/early-if-debug.mir20
-rw-r--r--test/CodeGen/MIR/ARM/bundled-instructions.mir20
-rw-r--r--test/CodeGen/MIR/ARM/ifcvt_diamond_unanalyzable.mir4
-rw-r--r--test/CodeGen/MIR/ARM/ifcvt_forked_diamond_unanalyzable.mir8
-rw-r--r--test/CodeGen/MIR/ARM/ifcvt_simple_unanalyzable.mir2
-rw-r--r--test/CodeGen/MIR/ARM/ifcvt_triangleWoCvtToNextEdge.mir4
-rw-r--r--test/CodeGen/MIR/X86/block-address-operands.mir10
-rw-r--r--test/CodeGen/MIR/X86/constant-pool.mir12
-rw-r--r--test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir2
-rw-r--r--test/CodeGen/MIR/X86/global-value-operands.mir6
-rw-r--r--test/CodeGen/MIR/X86/instructions-debug-location.mir8
-rw-r--r--test/CodeGen/MIR/X86/jump-table-info.mir4
-rw-r--r--test/CodeGen/MIR/X86/memory-operands.mir68
-rw-r--r--test/CodeGen/MIR/X86/metadata-operands.mir2
-rw-r--r--test/CodeGen/MIR/X86/null-register-operands.mir2
-rw-r--r--test/CodeGen/MIR/X86/roundtrip.mir2
-rw-r--r--test/CodeGen/MIR/X86/stack-object-operands.mir8
-rw-r--r--test/CodeGen/Mips/const-mult.ll2
-rw-r--r--test/CodeGen/Mips/mips64signextendsesf.ll2
-rw-r--r--test/CodeGen/PowerPC/cxx_tlscc64.ll2
-rw-r--r--test/CodeGen/PowerPC/debuginfo-split-int.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc32-align-long-double-sf.ll2
-rw-r--r--test/CodeGen/SPARC/LeonItinerariesUT.ll2
-rw-r--r--test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir44
-rw-r--r--test/CodeGen/SystemZ/clear-liverange-spillreg.mir46
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-07.mir2
-rw-r--r--test/CodeGen/SystemZ/fp-conv-17.mir102
-rw-r--r--test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir2
-rw-r--r--test/CodeGen/Thumb/machine-cse-physreg.mir8
-rw-r--r--test/CodeGen/Thumb/tbb-reuse.mir34
-rw-r--r--test/CodeGen/Thumb2/bicbfi.ll2
-rw-r--r--test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir18
-rw-r--r--test/CodeGen/Thumb2/tbb-removeadd.mir26
-rw-r--r--test/CodeGen/X86/GlobalISel/select-GV.mir32
-rw-r--r--test/CodeGen/X86/GlobalISel/select-constant.mir2
-rw-r--r--test/CodeGen/X86/GlobalISel/select-fconstant.mir12
-rw-r--r--test/CodeGen/X86/GlobalISel/select-frameIndex.mir6
-rw-r--r--test/CodeGen/X86/GlobalISel/select-gep.mir2
-rw-r--r--test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir40
-rw-r--r--test/CodeGen/X86/GlobalISel/select-memop-scalar.mir58
-rw-r--r--test/CodeGen/X86/GlobalISel/select-memop-v128.mir32
-rw-r--r--test/CodeGen/X86/GlobalISel/select-memop-v256.mir24
-rw-r--r--test/CodeGen/X86/GlobalISel/select-memop-v512.mir8
-rw-r--r--test/CodeGen/X86/block-placement.mir4
-rw-r--r--test/CodeGen/X86/conditional-tailcall-samedest.mir2
-rw-r--r--test/CodeGen/X86/domain-reassignment.mir44
-rw-r--r--test/CodeGen/X86/dynamic-alloca-lifetime.ll2
-rwxr-xr-xtest/CodeGen/X86/evex-to-vex-compress.mir4768
-rw-r--r--test/CodeGen/X86/expand-vr64-gr64-copy.mir8
-rw-r--r--test/CodeGen/X86/fcmove.ll2
-rw-r--r--test/CodeGen/X86/fixup-bw-inst.mir6
-rw-r--r--test/CodeGen/X86/i486-fence-loop.ll2
-rw-r--r--test/CodeGen/X86/implicit-null-checks.mir98
-rw-r--r--test/CodeGen/X86/implicit-use-spill.mir4
-rw-r--r--test/CodeGen/X86/ipra-inline-asm.ll2
-rw-r--r--test/CodeGen/X86/ipra-reg-alias.ll2
-rw-r--r--test/CodeGen/X86/ipra-reg-usage.ll2
-rw-r--r--test/CodeGen/X86/lea-opt-with-debug.mir30
-rw-r--r--test/CodeGen/X86/leaFixup32.mir40
-rw-r--r--test/CodeGen/X86/leaFixup64.mir90
-rw-r--r--test/CodeGen/X86/movtopush.mir34
-rw-r--r--test/CodeGen/X86/non-value-mem-operand.mir48
-rw-r--r--test/CodeGen/X86/peephole-recurrence.mir2
-rw-r--r--test/CodeGen/X86/post-ra-sched-with-debug.mir44
-rw-r--r--test/CodeGen/X86/pr27681.mir6
-rw-r--r--test/CodeGen/X86/pre-coalesce.mir10
-rw-r--r--test/CodeGen/X86/system-intrinsics-xgetbv.ll2
-rw-r--r--test/CodeGen/X86/tail-merge-after-mbp.mir20
138 files changed, 3976 insertions, 3986 deletions
diff --git a/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index b8a006492d0..162e04fe4ce 100644
--- a/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -141,7 +141,7 @@ AggressiveAntiDepBreaker::AggressiveAntiDepBreaker(
DEBUG(dbgs() << "AntiDep Critical-Path Registers:");
DEBUG(for (unsigned r : CriticalPathSet.set_bits())
- dbgs() << " " << TRI->getName(r));
+ dbgs() << " " << printReg(r, TRI));
DEBUG(dbgs() << '\n');
}
@@ -216,7 +216,7 @@ void AggressiveAntiDepBreaker::Observe(MachineInstr &MI, unsigned Count,
// schedule region).
if (State->IsLive(Reg)) {
DEBUG(if (State->GetGroup(Reg) != 0)
- dbgs() << " " << TRI->getName(Reg) << "=g" <<
+ dbgs() << " " << printReg(Reg, TRI) << "=g" <<
State->GetGroup(Reg) << "->g0(region live-out)");
State->UnionGroups(Reg, 0);
} else if ((DefIndices[Reg] < InsertPosIndex)
@@ -323,7 +323,7 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
RegRefs.erase(Reg);
State->LeaveGroup(Reg);
DEBUG(if (header) {
- dbgs() << header << TRI->getName(Reg); header = nullptr; });
+ dbgs() << header << printReg(Reg, TRI); header = nullptr; });
DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << tag);
// Repeat for subregisters. Note that we only do this if the superregister
// was not live because otherwise, regardless whether we have an explicit
@@ -337,8 +337,8 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
RegRefs.erase(SubregReg);
State->LeaveGroup(SubregReg);
DEBUG(if (header) {
- dbgs() << header << TRI->getName(Reg); header = nullptr; });
- DEBUG(dbgs() << " " << TRI->getName(SubregReg) << "->g" <<
+ dbgs() << header << printReg(Reg, TRI); header = nullptr; });
+ DEBUG(dbgs() << " " << printReg(SubregReg, TRI) << "->g" <<
State->GetGroup(SubregReg) << tag);
}
}
@@ -374,7 +374,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
- DEBUG(dbgs() << " " << TRI->getName(Reg) << "=g" << State->GetGroup(Reg));
+ DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g" << State->GetGroup(Reg));
// If MI's defs have a special allocation requirement, don't allow
// any def registers to be changed. Also assume all registers
@@ -393,8 +393,8 @@ void AggressiveAntiDepBreaker::PrescanInstruction(
unsigned AliasReg = *AI;
if (State->IsLive(AliasReg)) {
State->UnionGroups(Reg, AliasReg);
- DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << "(via " <<
- TRI->getName(AliasReg) << ")");
+ DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << "(via "
+ << printReg(AliasReg, TRI) << ")");
}
}
@@ -469,8 +469,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
- DEBUG(dbgs() << " " << TRI->getName(Reg) << "=g" <<
- State->GetGroup(Reg));
+ DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g" << State->GetGroup(Reg));
// It wasn't previously live but now it is, this is a kill. Forget
// the previous live-range information and start a new live-range
@@ -505,10 +504,10 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
if (Reg == 0) continue;
if (FirstReg != 0) {
- DEBUG(dbgs() << "=" << TRI->getName(Reg));
+ DEBUG(dbgs() << "=" << printReg(Reg, TRI));
State->UnionGroups(FirstReg, Reg);
} else {
- DEBUG(dbgs() << " " << TRI->getName(Reg));
+ DEBUG(dbgs() << " " << printReg(Reg, TRI));
FirstReg = Reg;
}
}
@@ -574,7 +573,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
// If Reg has any references, then collect possible rename regs
if (RegRefs.count(Reg) > 0) {
- DEBUG(dbgs() << "\t\t" << TRI->getName(Reg) << ":");
+ DEBUG(dbgs() << "\t\t" << printReg(Reg, TRI) << ":");
BitVector &BV = RenameRegisterMap[Reg];
assert(BV.empty());
@@ -583,7 +582,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
DEBUG({
dbgs() << " ::";
for (unsigned r : BV.set_bits())
- dbgs() << " " << TRI->getName(r);
+ dbgs() << " " << printReg(r, TRI);
dbgs() << "\n";
});
}
@@ -608,8 +607,8 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
if (renamecnt++ % DebugDiv != DebugMod)
return false;
- dbgs() << "*** Performing rename " << TRI->getName(SuperReg) <<
- " for debug ***\n";
+ dbgs() << "*** Performing rename " << printReg(SuperReg, TRI)
+ << " for debug ***\n";
}
#endif
@@ -646,7 +645,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
// Don't replace a register with itself.
if (NewSuperReg == SuperReg) continue;
- DEBUG(dbgs() << " [" << TRI->getName(NewSuperReg) << ':');
+ DEBUG(dbgs() << " [" << printReg(NewSuperReg, TRI) << ':');
RenameMap.clear();
// For each referenced group register (which must be a SuperReg or
@@ -663,7 +662,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
NewReg = TRI->getSubReg(NewSuperReg, NewSubRegIdx);
}
- DEBUG(dbgs() << " " << TRI->getName(NewReg));
+ DEBUG(dbgs() << " " << printReg(NewReg, TRI));
// Check if Reg can be renamed to NewReg.
if (!RenameRegisterMap[Reg].test(NewReg)) {
@@ -684,7 +683,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
unsigned AliasReg = *AI;
if (State->IsLive(AliasReg) ||
(KillIndices[Reg] > DefIndices[AliasReg])) {
- DEBUG(dbgs() << "(alias " << TRI->getName(AliasReg) << " live)");
+ DEBUG(dbgs() << "(alias " << printReg(AliasReg, TRI) << " live)");
found = true;
break;
}
@@ -793,7 +792,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
DEBUG(dbgs() << "Available regs:");
for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
if (!State->IsLive(Reg))
- DEBUG(dbgs() << " " << TRI->getName(Reg));
+ DEBUG(dbgs() << " " << printReg(Reg, TRI));
}
DEBUG(dbgs() << '\n');
#endif
@@ -849,7 +848,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
(Edge->getKind() != SDep::Output)) continue;
unsigned AntiDepReg = Edge->getReg();
- DEBUG(dbgs() << "\tAntidep reg: " << TRI->getName(AntiDepReg));
+ DEBUG(dbgs() << "\tAntidep reg: " << printReg(AntiDepReg, TRI));
assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
if (!MRI.isAllocatable(AntiDepReg)) {
@@ -952,7 +951,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
std::map<unsigned, unsigned> RenameMap;
if (FindSuitableFreeRegisters(GroupIndex, RenameOrder, RenameMap)) {
DEBUG(dbgs() << "\tBreaking anti-dependence edge on "
- << TRI->getName(AntiDepReg) << ":");
+ << printReg(AntiDepReg, TRI) << ":");
// Handle each group register...
for (std::map<unsigned, unsigned>::iterator
@@ -960,9 +959,9 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
unsigned CurrReg = S->first;
unsigned NewReg = S->second;
- DEBUG(dbgs() << " " << TRI->getName(CurrReg) << "->" <<
- TRI->getName(NewReg) << "(" <<
- RegRefs.count(CurrReg) << " refs)");
+ DEBUG(dbgs() << " " << printReg(CurrReg, TRI) << "->"
+ << printReg(NewReg, TRI) << "("
+ << RegRefs.count(CurrReg) << " refs)");
// Update the references to the old register CurrReg to
// refer to the new register NewReg.
diff --git a/lib/CodeGen/CriticalAntiDepBreaker.cpp b/lib/CodeGen/CriticalAntiDepBreaker.cpp
index 83f08e082c3..30918a98be0 100644
--- a/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -466,7 +466,7 @@ BreakAntiDependencies(const std::vector<SUnit> &SUnits,
DEBUG(dbgs() << "Available regs:");
for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
if (KillIndices[Reg] == ~0u)
- DEBUG(dbgs() << " " << TRI->getName(Reg));
+ DEBUG(dbgs() << " " << printReg(Reg, TRI));
}
DEBUG(dbgs() << '\n');
}
@@ -646,9 +646,9 @@ BreakAntiDependencies(const std::vector<SUnit> &SUnits,
LastNewReg[AntiDepReg],
RC, ForbidRegs)) {
DEBUG(dbgs() << "Breaking anti-dependence edge on "
- << TRI->getName(AntiDepReg)
- << " with " << RegRefs.count(AntiDepReg) << " references"
- << " using " << TRI->getName(NewReg) << "!\n");
+ << printReg(AntiDepReg, TRI) << " with "
+ << RegRefs.count(AntiDepReg) << " references"
+ << " using " << printReg(NewReg, TRI) << "!\n");
// Update the references to the old register to refer to the new
// register.
diff --git a/lib/CodeGen/ExecutionDepsFix.cpp b/lib/CodeGen/ExecutionDepsFix.cpp
index 61990671d88..73c4b6a145d 100644
--- a/lib/CodeGen/ExecutionDepsFix.cpp
+++ b/lib/CodeGen/ExecutionDepsFix.cpp
@@ -394,7 +394,7 @@ void ExecutionDepsFix::processDefs(MachineInstr *MI, bool breakDependency,
continue;
for (int rx : regIndices(MO.getReg())) {
// This instruction explicitly defines rx.
- DEBUG(dbgs() << TRI->getName(RC->getRegister(rx)) << ":\t" << CurInstr
+ DEBUG(dbgs() << printReg(RC->getRegister(rx), TRI) << ":\t" << CurInstr
<< '\t' << *MI);
if (breakDependency) {
diff --git a/lib/CodeGen/MIRPrinter.cpp b/lib/CodeGen/MIRPrinter.cpp
index 989e0423abe..e437a528115 100644
--- a/lib/CodeGen/MIRPrinter.cpp
+++ b/lib/CodeGen/MIRPrinter.cpp
@@ -192,23 +192,10 @@ template <> struct BlockScalarTraits<Module> {
} // end namespace yaml
} // end namespace llvm
-static void printRegMIR(unsigned Reg, raw_ostream &OS,
- const TargetRegisterInfo *TRI) {
- // TODO: Print Stack Slots.
- if (!Reg)
- OS << '_';
- else if (TargetRegisterInfo::isVirtualRegister(Reg))
- OS << '%' << TargetRegisterInfo::virtReg2Index(Reg);
- else if (Reg < TRI->getNumRegs())
- OS << '%' << StringRef(TRI->getName(Reg)).lower();
- else
- llvm_unreachable("Can't print this kind of register yet");
-}
-
static void printRegMIR(unsigned Reg, yaml::StringValue &Dest,
const TargetRegisterInfo *TRI) {
raw_string_ostream OS(Dest.Value);
- printRegMIR(Reg, OS, TRI);
+ OS << printReg(Reg, TRI);
}
void MIRPrinter::print(const MachineFunction &MF) {
@@ -262,7 +249,7 @@ static void printCustomRegMask(const uint32_t *RegMask, raw_ostream &OS,
if (RegMask[I / 32] & (1u << (I % 32))) {
if (IsRegInRegMaskFound)
OS << ',';
- printRegMIR(I, OS, TRI);
+ OS << printReg(I, TRI);
IsRegInRegMaskFound = true;
}
}
@@ -648,7 +635,7 @@ void MIPrinter::print(const MachineBasicBlock &MBB) {
if (!First)
OS << ", ";
First = false;
- printRegMIR(LI.PhysReg, OS, &TRI);
+ OS << printReg(LI.PhysReg, &TRI);
if (!LI.LaneMask.all())
OS << ":0x" << PrintLaneMask(LI.LaneMask);
}
@@ -949,7 +936,7 @@ void MIPrinter::print(const MachineInstr &MI, unsigned OpIdx,
OS << "early-clobber ";
if (Op.isDebug())
OS << "debug-use ";
- printRegMIR(Reg, OS, TRI);
+ OS << printReg(Reg, TRI);
// Print the sub register.
if (Op.getSubReg() != 0)
OS << '.' << TRI->getSubRegIndexName(Op.getSubReg());
@@ -1041,7 +1028,7 @@ void MIPrinter::print(const MachineInstr &MI, unsigned OpIdx,
if (RegMask[Reg / 32] & (1U << (Reg % 32))) {
if (IsCommaNeeded)
OS << ", ";
- printRegMIR(Reg, OS, TRI);
+ OS << printReg(Reg, TRI);
IsCommaNeeded = true;
}
}
@@ -1212,7 +1199,7 @@ static void printCFIRegister(unsigned DwarfReg, raw_ostream &OS,
OS << "<badreg>";
return;
}
- printRegMIR(Reg, OS, TRI);
+ OS << printReg(Reg, TRI);
}
void MIPrinter::print(const MCCFIInstruction &CFI,
diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp
index 83a9e1a58c0..2d138298a94 100644
--- a/lib/CodeGen/MachineVerifier.cpp
+++ b/lib/CodeGen/MachineVerifier.cpp
@@ -1097,8 +1097,8 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
TII->getRegClass(MCID, MONum, TRI, *MF)) {
if (!DRC->contains(Reg)) {
report("Illegal physical register for instruction", MO, MONum);
- errs() << TRI->getName(Reg) << " is not a "
- << TRI->getRegClassName(DRC) << " register.\n";
+ errs() << printReg(Reg, TRI) << " is not a "
+ << TRI->getRegClassName(DRC) << " register.\n";
}
}
}
@@ -1689,7 +1689,7 @@ void MachineVerifier::visitMachineFunctionAfter() {
if (MInfo.regsKilled.count(*I)) {
report("Virtual register killed in block, but needed live out.", &MBB);
errs() << "Virtual register " << printReg(*I)
- << " is used after the block.\n";
+ << " is used after the block.\n";
}
}
@@ -1722,13 +1722,13 @@ void MachineVerifier::verifyLiveVariables() {
if (!VI.AliveBlocks.test(MBB.getNumber())) {
report("LiveVariables: Block missing from AliveBlocks", &MBB);
errs() << "Virtual register " << printReg(Reg)
- << " must be live through the block.\n";
+ << " must be live through the block.\n";
}
} else {
if (VI.AliveBlocks.test(MBB.getNumber())) {
report("LiveVariables: Block should not be in AliveBlocks", &MBB);
errs() << "Virtual register " << printReg(Reg)
- << " is not needed live through the block.\n";
+ << " is not needed live through the block.\n";
}
}
}
diff --git a/lib/CodeGen/RegAllocBasic.cpp b/lib/CodeGen/RegAllocBasic.cpp
index b38373d1049..3d60f3101fc 100644
--- a/lib/CodeGen/RegAllocBasic.cpp
+++ b/lib/CodeGen/RegAllocBasic.cpp
@@ -219,8 +219,8 @@ bool RABasic::spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
Intfs.push_back(Intf);
}
}
- DEBUG(dbgs() << "spilling " << TRI->getName(PhysReg) <<
- " interferences with " << VirtReg << "\n");
+ DEBUG(dbgs() << "spilling " << printReg(PhysReg, TRI)
+ << " interferences with " << VirtReg << "\n");
assert(!Intfs.empty() && "expected interference");
// Spill each interfering vreg allocated to PhysReg or an alias.
diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp
index 9da881005b5..f26f43d79f2 100644
--- a/lib/CodeGen/RegAllocFast.cpp
+++ b/lib/CodeGen/RegAllocFast.cpp
@@ -813,7 +813,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
void RegAllocFast::dumpState() {
for (unsigned Reg = 1, E = TRI->getNumRegs(); Reg != E; ++Reg) {
if (PhysRegState[Reg] == regDisabled) continue;
- dbgs() << " " << TRI->getName(Reg);
+ dbgs() << " " << printReg(Reg, TRI);
switch(PhysRegState[Reg]) {
case regFree:
break;
diff --git a/lib/CodeGen/RegUsageInfoCollector.cpp b/lib/CodeGen/RegUsageInfoCollector.cpp
index 3aaa5a4738d..2b418feb29e 100644
--- a/lib/CodeGen/RegUsageInfoCollector.cpp
+++ b/lib/CodeGen/RegUsageInfoCollector.cpp
@@ -141,7 +141,7 @@ bool RegUsageInfoCollector::runOnMachineFunction(MachineFunction &MF) {
for (unsigned PReg = 1, PRegE = TRI->getNumRegs(); PReg < PRegE; ++PReg)
if (MachineOperand::clobbersPhysReg(&(RegMask[0]), PReg))
- DEBUG(dbgs() << TRI->getName(PReg) << " ");
+ DEBUG(dbgs() << printReg(PReg, TRI) << " ");
DEBUG(dbgs() << " \n----------------------------------------\n");
diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp
index 758a81fa5f7..5aeec854dad 100644
--- a/lib/CodeGen/RegisterScavenging.cpp
+++ b/lib/CodeGen/RegisterScavenging.cpp
@@ -288,8 +288,8 @@ bool RegScavenger::isRegUsed(unsigned Reg, bool includeReserved) const {
unsigned RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const {
for (unsigned Reg : *RC) {
if (!isRegUsed(Reg)) {
- DEBUG(dbgs() << "Scavenger found unused reg: " << TRI->getName(Reg) <<
- "\n");
+ DEBUG(dbgs() << "Scavenger found unused reg: " << printReg(Reg, TRI)
+ << "\n");
return Reg;
}
}
@@ -561,15 +561,15 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
// If we found an unused register there is no reason to spill it.
if (!isRegUsed(SReg)) {
- DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n");
+ DEBUG(dbgs() << "Scavenged register: " << printReg(SReg, TRI) << "\n");
return SReg;
}
ScavengedInfo &Scavenged = spill(SReg, *RC, SPAdj, I, UseMI);
Scavenged.Restore = &*std::prev(UseMI);
- DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) <<
- "\n");
+ DEBUG(dbgs() << "Scavenged register (with spill): " << printReg(SReg, TRI)
+ << "\n");
return SReg;
}
@@ -599,7 +599,7 @@ unsigned RegScavenger::scavengeRegisterBackwards(const TargetRegisterClass &RC,
Scavenged.Restore = &*std::prev(SpillBefore);
LiveUnits.removeReg(Reg);
DEBUG(dbgs() << "Scavenged register with spill: " << printReg(Reg, TRI)
- << " until " << *SpillBefore);
+ << " until " << *SpillBefore);
} else {
DEBUG(dbgs() << "Scavenged free register: " << printReg(Reg, TRI) << '\n');
}
diff --git a/lib/CodeGen/RegisterUsageInfo.cpp b/lib/CodeGen/RegisterUsageInfo.cpp
index fa74d418529..4e42deb406e 100644
--- a/lib/CodeGen/RegisterUsageInfo.cpp
+++ b/lib/CodeGen/RegisterUsageInfo.cpp
@@ -97,7 +97,7 @@ void PhysicalRegisterUsageInfo::print(raw_ostream &OS, const Module *M) const {
for (unsigned PReg = 1, PRegE = TRI->getNumRegs(); PReg < PRegE; ++PReg) {
if (MachineOperand::clobbersPhysReg(&(FPRMPair->second[0]), PReg))
- OS << TRI->getName(PReg) << " ";
+ OS << printReg(PReg, TRI) << " ";
}
OS << "\n";
}
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index acbae1bae33..a83f4eff383 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -1430,10 +1430,12 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
SmallVector<unsigned, 4> LRegs;
if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
break;
- DEBUG(dbgs() << " Interfering reg " <<
- (LRegs[0] == TRI->getNumRegs() ? "CallResource"
- : TRI->getName(LRegs[0]))
- << " SU #" << CurSU->NodeNum << '\n');
+ DEBUG(dbgs() << " Interfering reg ";
+ if (LRegs[0] == TRI->getNumRegs())
+ dbgs() << "CallResource";
+ else
+ dbgs() << printReg(LRegs[0], TRI);
+ dbgs() << " SU #" << CurSU->NodeNum << '\n');
std::pair<LRegsMapT::iterator, bool> LRegsPair =
LRegsMap.insert(std::make_pair(CurSU, LRegs));
if (LRegsPair.second) {
diff --git a/lib/CodeGen/StackMaps.cpp b/lib/CodeGen/StackMaps.cpp
index 1fa4c2f4d9d..8d502bdae38 100644
--- a/lib/CodeGen/StackMaps.cpp
+++ b/lib/CodeGen/StackMaps.cpp
@@ -193,14 +193,14 @@ void StackMaps::print(raw_ostream &OS) {
case Location::Register:
OS << "Register ";
if (TRI)
- OS << TRI->getName(Loc.Reg);
+ OS << printReg(Loc.Reg, TRI);
else
OS << Loc.Reg;
break;
case Location::Direct:
OS << "Direct ";
if (TRI)
- OS << TRI->getName(Loc.Reg);
+ OS << printReg(Loc.Reg, TRI);
else
OS << Loc.Reg;
if (Loc.Offset)
@@ -209,7 +209,7 @@ void StackMaps::print(raw_ostream &OS) {
case Location::Indirect:
OS << "Indirect ";
if (TRI)
- OS << TRI->getName(Loc.Reg);
+ OS << printReg(Loc.Reg, TRI);
else
OS << Loc.Reg;
OS << "+" << Loc.Offset;
@@ -233,7 +233,7 @@ void StackMaps::print(raw_ostream &OS) {
for (const auto &LO : LiveOuts) {
OS << WSMP << "\t\tLO " << Idx << ": ";
if (TRI)
- OS << TRI->getName(LO.Reg);
+ OS << printReg(LO.Reg, TRI);
else
OS << LO.Reg;
OS << "\t[encoding: .short " << LO.DwarfRegNum << ", .byte 0, .byte "
diff --git a/lib/CodeGen/TargetRegisterInfo.cpp b/lib/CodeGen/TargetRegisterInfo.cpp
index 721761eef61..f4e5583cbe7 100644
--- a/lib/CodeGen/TargetRegisterInfo.cpp
+++ b/lib/CodeGen/TargetRegisterInfo.cpp
@@ -94,11 +94,14 @@ Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI,
OS << "SS#" << TargetRegisterInfo::stackSlot2Index(Reg);
else if (TargetRegisterInfo::isVirtualRegister(Reg))
OS << '%' << TargetRegisterInfo::virtReg2Index(Reg);
- else if (TRI && Reg < TRI->getNumRegs()) {
+ else if (!TRI)
+ OS << '%' << "physreg" << Reg;
+ else if (Reg < TRI->getNumRegs()) {
OS << '%';
printLowerCase(TRI->getName(Reg), OS);
} else
- OS << "%physreg" << Reg;
+ llvm_unreachable("Register kind is unsupported.");
+
if (SubIdx) {
if (TRI)
OS << ':' << TRI->getSubRegIndexName(SubIdx);
diff --git a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
index db1fbe069f4..2d510a48d1c 100644
--- a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
+++ b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -538,7 +538,7 @@ bool AArch64A57FPLoadBalancing::colorChain(Chain *G, Color C,
DEBUG(dbgs() << "Scavenging (thus coloring) failed!\n");
return false;
}
- DEBUG(dbgs() << " - Scavenged register: " << TRI->getName(Reg) << "\n");
+ DEBUG(dbgs() << " - Scavenged register: " << printReg(Reg, TRI) << "\n");
std::map<unsigned, unsigned> Substs;
for (MachineInstr &I : *G) {
@@ -611,8 +611,8 @@ void AArch64A57FPLoadBalancing::scanInstruction(
// unit.
unsigned DestReg = MI->getOperand(0).getReg();
- DEBUG(dbgs() << "New chain started for register "
- << TRI->getName(DestReg) << " at " << *MI);
+ DEBUG(dbgs() << "New chain started for register " << printReg(DestReg, TRI)
+ << " at " << *MI);
auto G = llvm::make_unique<Chain>(MI, Idx, getColor(DestReg));
ActiveChains[DestReg] = G.get();
@@ -632,7 +632,7 @@ void AArch64A57FPLoadBalancing::scanInstruction(
if (ActiveChains.find(AccumReg) != ActiveChains.end()) {
DEBUG(dbgs() << "Chain found for accumulator register "
- << TRI->getName(AccumReg) << " in MI " << *MI);
+ << printReg(AccumReg, TRI) << " in MI " << *MI);
// For simplicity we only chain together sequences of MULs/MLAs where the
// accumulator register is killed on each instruction. This means we don't
@@ -657,7 +657,7 @@ void AArch64A57FPLoadBalancing::scanInstruction(
}
DEBUG(dbgs() << "Creating new chain for dest register "
- << TRI->getName(DestReg) << "\n");
+ << printReg(DestReg, TRI) << "\n");
auto G = llvm::make_unique<Chain>(MI, Idx, getColor(DestReg));
ActiveChains[DestReg] = G.get();
AllChains.push_back(std::move(G));
@@ -685,8 +685,8 @@ maybeKillChain(MachineOperand &MO, unsigned Idx,
// If this is a KILL of a current chain, record it.
if (MO.isKill() && ActiveChains.find(MO.getReg()) != ActiveChains.end()) {
- DEBUG(dbgs() << "Kill seen for chain " << TRI->getName(MO.getReg())
- << "\n");
+ DEBUG(dbgs() << "Kill seen for chain " << printReg(MO.getReg(), TRI)
+ << "\n");
ActiveChains[MO.getReg()]->setKill(MI, Idx, /*Immutable=*/MO.isTied());
}
ActiveChains.erase(MO.getReg());
@@ -697,7 +697,7 @@ maybeKillChain(MachineOperand &MO, unsigned Idx,
I != E;) {
if (MO.clobbersPhysReg(I->first)) {
DEBUG(dbgs() << "Kill (regmask) seen for chain "
- << TRI->getName(I->first) << "\n");
+ << printReg(I->first, TRI) << "\n");
I->second->setKill(MI, Idx, /*Immutable=*/true);
ActiveChains.erase(I++);
} else
diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp
index 257e6f6e946..72330d9b7cb 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -1060,9 +1060,9 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
else
StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
- DEBUG(dbgs() << "CSR spill: (" << TRI->getName(Reg1);
+ DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI);
if (RPI.isPaired())
- dbgs() << ", " << TRI->getName(Reg2);
+ dbgs() << ", " << printReg(Reg2, TRI);
dbgs() << ") -> fi#(" << RPI.FrameIdx;
if (RPI.isPaired())
dbgs() << ", " << RPI.FrameIdx+1;
@@ -1123,9 +1123,9 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
else
LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
- DEBUG(dbgs() << "CSR restore: (" << TRI->getName(Reg1);
+ DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI);
if (RPI.isPaired())
- dbgs() << ", " << TRI->getName(Reg2);
+ dbgs() << ", " << printReg(Reg2, TRI);
dbgs() << ") -> fi#(" << RPI.FrameIdx;
if (RPI.isPaired())
dbgs() << ", " << RPI.FrameIdx+1;
@@ -1234,7 +1234,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
if (BigStack) {
if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo)
- << " to get a scratch register.\n");
+ << " to get a scratch register.\n");
SavedRegs.set(UnspilledCSGPR);
// MachO's compact unwind format relies on all registers being stored in
// pairs, so if we need to spill one extra for BigStack, then we need to
diff --git a/lib/Target/BPF/BPFISelDAGToDAG.cpp b/lib/Target/BPF/BPFISelDAGToDAG.cpp
index ef52bae3d76..98cd0f165a6 100644
--- a/lib/Target/BPF/BPFISelDAGToDAG.cpp
+++ b/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -546,8 +546,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
if (!RegN || !TargetRegisterInfo::isVirtualRegister(RegN->getReg()))
return;
unsigned AndOpReg = RegN->getReg();
- DEBUG(dbgs() << "Examine %" << TargetRegisterInfo::virtReg2Index(AndOpReg)
- << '\n');
+ DEBUG(dbgs() << "Examine " << printReg(AndOpReg) << '\n');
// Examine the PHI insns in the MachineBasicBlock to found out the
// definitions of this virtual register. At this stage (DAG2DAG
diff --git a/test/CodeGen/AArch64/GlobalISel/debug-insts.ll b/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
index eb2d2ec4307..be510b5f7e3 100644
--- a/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
+++ b/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
@@ -6,7 +6,7 @@
; CHECK: - { id: {{.*}}, name: in.addr, type: default, offset: 0, size: {{.*}}, alignment: {{.*}},
; CHECK-NEXT: callee-saved-register: '', callee-saved-restored: true,
; CHECK-NEXT: di-variable: '!11', di-expression: '!DIExpression()',
-; CHECK: DBG_VALUE debug-use %0(s32), debug-use _, !11, !DIExpression(), debug-location !12
+; CHECK: DBG_VALUE debug-use %0(s32), debug-use %noreg, !11, !DIExpression(), debug-location !12
define void @debug_declare(i32 %in) #0 !dbg !7 {
entry:
%in.addr = alloca i32, align 4
@@ -17,7 +17,7 @@ entry:
}
; CHECK-LABEL: name: debug_declare_vla
-; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use _, !14, !DIExpression(), debug-location !15
+; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use %noreg, !14, !DIExpression(), debug-location !15
define void @debug_declare_vla(i32 %in) #0 !dbg !13 {
entry:
%vla.addr = alloca i32, i32 %in
@@ -29,16 +29,16 @@ entry:
; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0
define void @debug_value(i32 %in) #0 !dbg !16 {
%addr = alloca i32
-; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use _, !17, !DIExpression(), debug-location !18
+; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use %noreg, !17, !DIExpression(), debug-location !18
call void @llvm.dbg.value(metadata i32 %in, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
store i32 %in, i32* %addr
-; CHECK: DBG_VALUE debug-use %1(p0), debug-use _, !17, !DIExpression(DW_OP_deref), debug-location !18
+; CHECK: DBG_VALUE debug-use %1(p0), debug-use %noreg, !17, !DIExpression(DW_OP_deref), debug-location !18
call void @llvm.dbg.value(metadata i32* %addr, i64 0, metadata !17, metadata !DIExpression(DW_OP_deref)), !dbg !18
; CHECK: DBG_VALUE 123, 0, !17, !DIExpression(), debug-location !18
call void @llvm.dbg.value(metadata i32 123, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
; CHECK: DBG_VALUE float 1.000000e+00, 0, !17, !DIExpression(), debug-location !18
call void @llvm.dbg.value(metadata float 1.000000e+00, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
-; CHECK: DBG_VALUE _, 0, !17, !DIExpression(), debug-location !18
+; CHECK: DBG_VALUE %noreg, 0, !17, !DIExpression(), debug-location !18
call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
ret void
}
diff --git a/test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir b/test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir
index 4282bffdab1..201565c675a 100644
--- a/test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir
+++ b/test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir
@@ -36,9 +36,9 @@ body: |
bb.0:
liveins: %w0
%0:_(s32) = COPY %w0
- ; CHECK: DBG_VALUE debug-use %0(s32), debug-use _, !7, !DIExpression(), debug-location !9
- DBG_VALUE debug-use %0(s32), debug-use _, !7, !DIExpression(), debug-location !9
+ ; CHECK: DBG_VALUE debug-use %0(s32), debug-use %noreg, !7, !DIExpression(), debug-location !9
+ DBG_VALUE debug-use %0(s32), debug-use %noreg, !7, !DIExpression(), debug-location !9
- ; CHECK: DBG_VALUE _, 0, !7, !DIExpression(), debug-location !9
- DBG_VALUE _, 0, !7, !DIExpression(), debug-location !9
+ ; CHECK: DBG_VALUE %noreg, 0, !7, !DIExpression(), debug-location !9
+ DBG_VALUE %noreg, 0, !7, !DIExpression(), debug-location !9
...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir b/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
index af83be5c075..7396ae57f8f 100644
--- a/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
+++ b/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
@@ -46,11 +46,11 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY]], [[COPY]]
; CHECK: %w0 = COPY [[ADDWrr]]
- ; CHECK: DBG_VALUE debug-use [[ADDWrr]], debug-use _, !7, !DIExpression(), debug-location !9
+ ; CHECK: DBG_VALUE debug-use [[ADDWrr]], debug-use %noreg, !7, !DIExpression(), debug-location !9
%0:gpr(s32) = COPY %w0
%1:gpr(s32) = G_ADD %0, %0
%w0 = COPY %1(s32)
- DBG_VALUE debug-use %1(s32), debug-use _, !7, !DIExpression(), debug-location !9
+ DBG_VALUE debug-use %1(s32), debug-use %noreg, !7, !DIExpression(), debug-location !9
...
---
@@ -62,7 +62,7 @@ body: |
liveins: %w0
; CHECK-LABEL: name: test_dbg_value_dead
; CHECK-NOT: COPY
- ; CHECK: DBG_VALUE debug-use _, debug-use _, !7, !DIExpression(), debug-location !9
+ ; CHECK: DBG_VALUE debug-use %noreg, debug-use %noreg, !7, !DIExpression(), debug-location !9
%0:gpr(s32) = COPY %w0
- DBG_VALUE debug-use %0(s32), debug-use _, !7, !DIExpression(), debug-location !9
+ DBG_VALUE debug-use %0(s32), debug-use %noreg, !7, !DIExpression(), debug-location !9
...
diff --git a/test/CodeGen/AArch64/machine-outliner-remarks.ll b/test/CodeGen/AArch64/machine-outliner-remarks.ll
index 1a237a2403e..a5f131b5a0c 100644
--- a/test/CodeGen/AArch64/machine-outliner-remarks.ll
+++ b/test/CodeGen/AArch64/machine-outliner-remarks.ll
@@ -120,4 +120,4 @@ attributes #0 = { noredzone nounwind ssp uwtable "no-frame-pointer-elim"="false"
!26 = !DILocation(line: 29, column: 9, scope: !18)
!27 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 35, type: !9, isLocal: false, isDefinition: true, scopeLine: 35, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
!33 = !DILocation(line: 36, column: 1, scope: !27)
-!35 = !DILocation(line: 38, column: 1, scope: !27) \ No newline at end of file
+!35 = !DILocation(line: 38, column: 1, scope: !27)
diff --git a/test/CodeGen/AMDGPU/fadd.ll b/test/CodeGen/AMDGPU/fadd.ll
index 621a0de281d..a2f1f7195f2 100644
--- a/test/CodeGen/AMDGPU/fadd.ll
+++ b/test/CodeGen/AMDGPU/fadd.ll
@@ -72,4 +72,4 @@ define amdgpu_kernel void @fadd_0_nsz_attr_f32(float addrspace(1)* %out, float %
}
attributes #0 = { nounwind }
-attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" } \ No newline at end of file
+attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" }
diff --git a/test/CodeGen/AMDGPU/inserted-wait-states.mir b/test/CodeGen/AMDGPU/inserted-wait-states.mir
index 16d9070849b..698f2c3ebc4 100644
--- a/test/CodeGen/AMDGPU/inserted-wait-states.mir
+++ b/test/CodeGen/AMDGPU/inserted-wait-states.mir
@@ -548,7 +548,7 @@ body: |
%flat_scr_lo = S_ADD_U32 %sgpr6, %sgpr9, implicit-def %scc
%flat_scr_hi = S_ADDC_U32 %sgpr7, 0, implicit-def %scc, implicit %scc
- DBG_VALUE _, 2, !5, !11, debug-location !12
+ DBG_VALUE %noreg, 2, !5, !11, debug-location !12
%sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed %sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
dead %sgpr6_sgpr7 = KILL %sgpr4_sgpr5
%sgpr8 = S_MOV_B32 %sgpr5
diff --git a/test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll b/test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll
index 55c2229fb6b..ebeed0dd443 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll
@@ -130,4 +130,4 @@ bb:
}
attributes #0 = { norecurse nounwind "amdgpu-waves-per-eu"="1,1" }
-attributes #1 = { norecurse nounwind } \ No newline at end of file
+attributes #1 = { norecurse nounwind }
diff --git a/test/CodeGen/AMDGPU/regcoalesce-dbg.mir b/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
index c5a9a0ad01a..69538d8b738 100644
--- a/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
+++ b/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
@@ -63,7 +63,7 @@ body: |
%19.sub1 = COPY killed %18
%10 = S_MOV_B32 61440
%11 = S_MOV_B32 0
- DBG_VALUE debug-use %11, debug-use _, !1, !8, debug-location !9
+ DBG_VALUE debug-use %11, debug-use %noreg, !1, !8, debug-location !9
undef %12.sub0 = COPY killed %11
%12.sub1 = COPY killed %10
undef %13.sub0_sub1 = COPY killed %4
diff --git a/test/CodeGen/ARM/ARMLoadStoreDBG.mir b/test/CodeGen/ARM/ARMLoadStoreDBG.mir
index 1ff3bffd387..86d09ce7b09 100644
--- a/test/CodeGen/ARM/ARMLoadStoreDBG.mir
+++ b/test/CodeGen/ARM/ARMLoadStoreDBG.mir
@@ -120,40 +120,40 @@ body: |
bb.0.entry:
liveins: %r0, %r1, %r2, %r3, %lr, %r7
- DBG_VALUE debug-use %r0, debug-use _, !18, !27, debug-location !28
- DBG_VALUE debug-use %r1, debug-use _, !19, !27, debug-location !28
- DBG_VALUE debug-use %r2, debug-use _, !20, !27, debug-location !28
- DBG_VALUE debug-use %r3, debug-use _, !21, !27, debug-location !28
- t2CMPri %r3, 4, 14, _, implicit-def %cpsr, debug-location !31
+ DBG_VALUE debug-use %r0, debug-use %noreg, !18, !27, debug-location !28
+ DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28
+ DBG_VALUE debug-use %r2, debug-use %noreg, !20, !27, debug-location !28
+ DBG_VALUE debug-use %r3, debug-use %noreg, !21, !27, debug-location !28
+ t2CMPri %r3, 4, 14, %noreg, implicit-def %cpsr, debug-location !31
t2Bcc %bb.2.if.end, 2, killed %cpsr
bb.1:
liveins: %lr, %r7
- DBG_VALUE debug-use %r1, debug-use _, !19, !27, debug-location !28
- %r0 = t2MOVi -1, 14, _, _
- DBG_VALUE debug-use %r1, debug-use _, !19, !27, debug-location !28
- tBX_RET 14, _, implicit %r0, debug-location !34
+ DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28
+ %r0 = t2MOVi -1, 14, %noreg, %noreg
+ DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28
+ tBX_RET 14, %noreg, implicit %r0, debug-location !34
bb.2.if.end:
liveins: %r0, %r2, %r3, %r7, %lr
- %sp = frame-setup t2STMDB_UPD %sp, 14, _, killed %r7, killed %lr
+ %sp = frame-setup t2STMDB_UPD %sp, 14, %noreg, killed %r7, killed %lr
frame-setup CFI_INSTRUCTION def_cfa_offset 8
frame-setup CFI_INSTRUCTION offset %lr, -4
frame-setup CFI_INSTRUCTION offset %r7, -8
- DBG_VALUE debug-use %r0, debug-use _, !18, !27, debug-location !28
- DBG_VALUE debug-use %r1, debug-use _, !19, !27, debug-location !28
- DBG_VALUE debug-use %r2, debug-use _, !20, !27, debug-location !28
- DBG_VALUE debug-use %r3, debug-use _, !21, !27, debug-location !28
+ DBG_VALUE debug-use %r0, debug-use %noreg, !18, !27, debug-location !28
+ DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28
+ DBG_VALUE debug-use %r2, debug-use %noreg, !20, !27, debug-location !28
+ DBG_VALUE debug-use %r3, debug-use %noreg, !21, !27, debug-location !28
%r1 = COPY killed %r2, debug-location !32
- DBG_VALUE debug-use %r1, debug-use _, !19, !27, debug-location !28
+ DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28
%r2 = COPY killed %r3, debug-location !32
- tBL 14, _, @g, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit-def %sp, debug-location !32
- %r0 = t2MOVi 0, 14, _, _
- %sp = t2LDMIA_UPD %sp, 14, _, def %r7, def %lr
- tBX_RET 14, _, implicit %r0, debug-location !34
+ tBL 14, %noreg, @g, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit-def %sp, debug-location !32
+ %r0 = t2MOVi 0, 14, %noreg, %noreg
+ %sp = t2LDMIA_UPD %sp, 14, %noreg, def %r7, def %lr
+ tBX_RET 14, %noreg, implicit %r0, debug-location !34
# Verify that the DBG_VALUE is ignored.
-# CHECK: %sp = t2LDMIA_RET %sp, 14, _, def %r7, def %pc, implicit %r0
+# CHECK: %sp = t2LDMIA_RET %sp, 14, %noreg, def %r7, def %pc, implicit %r0
...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll b/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll
index c1dd9276ddd..ec6ea632591 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll
@@ -7,11 +7,11 @@ define arm_aapcscc void @test_indirect_call(void() *%fptr) {
; V5T: %[[FPTR:[0-9]+]]:gpr(p0) = COPY %r0
; V4T: %[[FPTR:[0-9]+]]:tgpr(p0) = COPY %r0
; NOV4T: %[[FPTR:[0-9]+]]:tgpr(p0) = COPY %r0
-; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; V5T: BLX %[[FPTR]](p0), csr_aapcs, implicit-def %lr, implicit %sp
; V4T: BX_CALL %[[FPTR]](p0), csr_aapcs, implicit-def %lr, implicit %sp
; NOV4T: BMOVPCRX_CALL %[[FPTR]](p0), csr_aapcs, implicit-def %lr, implicit %sp
-; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
entry:
notail call arm_aapcscc void %fptr()
ret void
@@ -21,9 +21,9 @@ declare arm_aapcscc void @call_target()
define arm_aapcscc void @test_direct_call() {
; CHECK-LABEL: name: test_direct_call
-; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: BL @call_target, csr_aapcs, implicit-def %lr, implicit %sp
-; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
entry:
notail call arm_aapcscc void @call_target()
ret void
diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir
index e2b6f878e6b..c8ed142903b 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir
@@ -69,18 +69,18 @@ body: |
; CHECK-LABEL: name: test_icmp_eq_s32
; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0
; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, _, implicit-def %cpsr
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %r0
%1(s32) = COPY %r1
%2(s1) = G_ICMP intpred(eq), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_icmp_ne_s32
@@ -99,18 +99,18 @@ body: |
; CHECK-LABEL: name: test_icmp_ne_s32
; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0
; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, _, implicit-def %cpsr
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 1, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %r0
%1(s32) = COPY %r1
%2(s1) = G_ICMP intpred(ne), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_icmp_ugt_s32
@@ -129,18 +129,18 @@ body: |
; CHECK-LABEL: name: test_icmp_ugt_s32
; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0
; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, _, implicit-def %cpsr
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 8, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %r0
%1(s32) = COPY %r1
%2(s1) = G_ICMP intpred(ugt), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_icmp_uge_s32
@@ -159,18 +159,18 @@ body: |
; CHECK-LABEL: name: test_icmp_uge_s32
; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0
; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, _, implicit-def %cpsr
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 2, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %r0
%1(s32) = COPY %r1
%2(s1) = G_ICMP intpred(uge), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_icmp_ult_s32
@@ -189,18 +189,18 @@ body: |
; CHECK-LABEL: name: test_icmp_ult_s32
; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0
; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, _, implicit-def %cpsr
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 3, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %r0
%1(s32) = COPY %r1
%2(s1) = G_ICMP intpred(ult), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_icmp_ule_s32
@@ -219,18 +219,18 @@ body: |
; CHECK-LABEL: name: test_icmp_ule_s32
; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0
; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, _, implicit-def %cpsr
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 9, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %r0
%1(s32) = COPY %r1
%2(s1) = G_ICMP intpred(ule), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_icmp_sgt_s32
@@ -249,18 +249,18 @@ body: |
; CHECK-LABEL: name: test_icmp_sgt_s32
; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0
; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, _, implicit-def %cpsr
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %r0
%1(s32) = COPY %r1
%2(s1) = G_ICMP intpred(sgt), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_icmp_sge_s32
@@ -279,18 +279,18 @@ body: |
; CHECK-LABEL: name: test_icmp_sge_s32
; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0
; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, _, implicit-def %cpsr
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 10, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %r0
%1(s32) = COPY %r1
%2(s1) = G_ICMP intpred(sge), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_icmp_slt_s32
@@ -309,18 +309,18 @@ body: |
; CHECK-LABEL: name: test_icmp_slt_s32
; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0
; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, _, implicit-def %cpsr
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 11, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %r0
%1(s32) = COPY %r1
%2(s1) = G_ICMP intpred(slt), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_icmp_sle_s32
@@ -339,18 +339,18 @@ body: |
; CHECK-LABEL: name: test_icmp_sle_s32
; CHECK: [[COPY:%[0-9]+]]:gpr = COPY %r0
; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY %r1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, _, implicit-def %cpsr
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: CMPrr [[COPY]], [[COPY1]], 14, %noreg, implicit-def %cpsr
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 13, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %r0
%1(s32) = COPY %r1
%2(s1) = G_ICMP intpred(sle), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_true_s32
@@ -367,16 +367,16 @@ body: |
liveins: %s0, %s1
; CHECK-LABEL: name: test_fcmp_true_s32
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 1, 14, _, _
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, _, _
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 1, 14, %noreg, %noreg
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(true), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_false_s32
@@ -393,16 +393,16 @@ body: |
liveins: %s0, %s1
; CHECK-LABEL: name: test_fcmp_false_s32
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, _, _
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(false), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_oeq_s32
@@ -421,19 +421,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_oeq_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(oeq), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ogt_s32
@@ -452,19 +452,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_ogt_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(ogt), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_oge_s32
@@ -483,19 +483,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_oge_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 10, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(oge), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_olt_s32
@@ -514,19 +514,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_olt_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 4, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(olt), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ole_s32
@@ -545,19 +545,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_ole_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 9, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(ole), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ord_s32
@@ -576,19 +576,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_ord_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 7, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(ord), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ugt_s32
@@ -607,19 +607,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_ugt_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 8, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(ugt), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_uge_s32
@@ -638,19 +638,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_uge_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 5, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(uge), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ult_s32
@@ -669,19 +669,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_ult_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 11, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(ult), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ule_s32
@@ -700,19 +700,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_ule_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 13, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(ule), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_une_s32
@@ -731,19 +731,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_une_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 1, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(une), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_uno_s32
@@ -762,19 +762,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_uno_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 6, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(uno), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_one_s32
@@ -793,22 +793,22 @@ body: |
; CHECK-LABEL: name: test_fcmp_one_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, %cpsr
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi1:%[0-9]+]]:gpr = MOVCCi [[MOVCCi]], 1, 4, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(one), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ueq_s32
@@ -827,22 +827,22 @@ body: |
; CHECK-LABEL: name: test_fcmp_ueq_s32
; CHECK: [[COPY:%[0-9]+]]:spr = COPY %s0
; CHECK: [[COPY1:%[0-9]+]]:spr = COPY %s1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, %cpsr
- ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: VCMPS [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi1:%[0-9]+]]:gpr = MOVCCi [[MOVCCi]], 1, 6, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s32) = COPY %s0
%1(s32) = COPY %s1
%2(s1) = G_FCMP floatpred(ueq), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_true_s64
@@ -859,16 +859,16 @@ body: |
liveins: %d0, %d1
; CHECK-LABEL: name: test_fcmp_true_s64
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 1, 14, _, _
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, _, _
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 1, 14, %noreg, %noreg
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(true), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_false_s64
@@ -885,16 +885,16 @@ body: |
liveins: %d0, %d1
; CHECK-LABEL: name: test_fcmp_false_s64
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, _, _
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(false), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_oeq_s64
@@ -913,19 +913,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_oeq_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(oeq), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ogt_s64
@@ -944,19 +944,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_ogt_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(ogt), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_oge_s64
@@ -975,19 +975,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_oge_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 10, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(oge), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_olt_s64
@@ -1006,19 +1006,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_olt_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 4, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(olt), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ole_s64
@@ -1037,19 +1037,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_ole_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 9, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(ole), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ord_s64
@@ -1068,19 +1068,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_ord_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 7, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(ord), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ugt_s64
@@ -1099,19 +1099,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_ugt_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 8, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(ugt), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_uge_s64
@@ -1130,19 +1130,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_uge_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 5, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(uge), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ult_s64
@@ -1161,19 +1161,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_ult_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 11, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(ult), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ule_s64
@@ -1192,19 +1192,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_ule_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 13, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(ule), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_une_s64
@@ -1223,19 +1223,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_une_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 1, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(une), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_uno_s64
@@ -1254,19 +1254,19 @@ body: |
; CHECK-LABEL: name: test_fcmp_uno_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 6, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(uno), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_one_s64
@@ -1285,22 +1285,22 @@ body: |
; CHECK-LABEL: name: test_fcmp_one_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 12, %cpsr
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi1:%[0-9]+]]:gpr = MOVCCi [[MOVCCi]], 1, 4, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(one), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ueq_s64
@@ -1319,20 +1319,20 @@ body: |
; CHECK-LABEL: name: test_fcmp_ueq_s64
; CHECK: [[COPY:%[0-9]+]]:dpr = COPY %d0
; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY %d1
- ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, _, _
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: [[MOVi:%[0-9]+]]:gpr = MOVi 0, 14, %noreg, %noreg
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi:%[0-9]+]]:gpr = MOVCCi [[MOVi]], 1, 0, %cpsr
- ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, _, implicit-def %fpscr_nzcv
- ; CHECK: FMSTAT 14, _, implicit-def %cpsr, implicit %fpscr_nzcv
+ ; CHECK: VCMPD [[COPY]], [[COPY1]], 14, %noreg, implicit-def %fpscr_nzcv
+ ; CHECK: FMSTAT 14, %noreg, implicit-def %cpsr, implicit %fpscr_nzcv
; CHECK: [[MOVCCi1:%[0-9]+]]:gpr = MOVCCi [[MOVCCi]], 1, 6, %cpsr
- ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, _, _
+ ; CHECK: [[ANDri:%[0-9]+]]:gpr = ANDri [[MOVCCi1]], 1, 14, %noreg, %noreg
; CHECK: %r0 = COPY [[ANDri]]
- ; CHECK: BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
%0(s64) = COPY %d0
%1(s64) = COPY %d1
%2(s1) = G_FCMP floatpred(ueq), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir
index d8da96103fb..cee6a121bf8 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir
@@ -50,13 +50,13 @@ body: |
%3(s32) = G_MUL %0, %1
%4(s32) = G_ADD %3, %2
- ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MLA [[VREGX]], [[VREGY]], [[VREGZ]], 14, _, _
+ ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MLA [[VREGX]], [[VREGY]], [[VREGZ]], 14, %noreg, %noreg
%r0 = COPY %4(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_mla_commutative
@@ -84,13 +84,13 @@ body: |
%3(s32) = G_MUL %0, %1
%4(s32) = G_ADD %2, %3
- ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MLA [[VREGX]], [[VREGY]], [[VREGZ]], 14, _, _
+ ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MLA [[VREGX]], [[VREGY]], [[VREGZ]], 14, %noreg, %noreg
%r0 = COPY %4(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_mla_v5
@@ -118,13 +118,13 @@ body: |
%3(s32) = G_MUL %0, %1
%4(s32) = G_ADD %3, %2
- ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MLAv5 [[VREGX]], [[VREGY]], [[VREGZ]], 14, _, _
+ ; CHECK: [[VREGR:%[0-9]+]]:gprnopc = MLAv5 [[VREGX]], [[VREGY]], [[VREGZ]], 14, %noreg, %noreg
%r0 = COPY %4(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_mls
@@ -152,13 +152,13 @@ body: |
%3(s32) = G_MUL %0, %1
%4(s32) = G_SUB %2, %3
- ; CHECK: [[VREGR:%[0-9]+]]:gpr = MLS [[VREGX]], [[VREGY]], [[VREGZ]], 14, _
+ ; CHECK: [[VREGR:%[0-9]+]]:gpr = MLS [[VREGX]], [[VREGY]], [[VREGZ]], 14, %noreg
%r0 = COPY %4(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_no_mls
@@ -186,14 +186,14 @@ body: |
%3(s32) = G_MUL %0, %1
%4(s32) = G_SUB %2, %3
- ; CHECK: [[VREGM:%[0-9]+]]:gprnopc = MULv5 [[VREGX]], [[VREGY]], 14, _, _
- ; CHECK: [[VREGR:%[0-9]+]]:gpr = SUBrr [[VREGZ]], [[VREGM]], 14, _, _
+ ; CHECK: [[VREGM:%[0-9]+]]:gprnopc = MULv5 [[VREGX]], [[VREGY]], 14, %noreg, %noreg
+ ; CHECK: [[VREGR:%[0-9]+]]:gpr = SUBrr [[VREGZ]], [[VREGM]], 14, %noreg, %noreg
%r0 = COPY %4(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_shifts_to_revsh
@@ -238,8 +238,8 @@ body: |
%r0 = COPY %9(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_shifts_to_revsh_commutative
@@ -284,8 +284,8 @@ body: |
%r0 = COPY %9(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_shifts_no_revsh_features
@@ -329,7 +329,7 @@ body: |
%r0 = COPY %9(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_shifts_no_revsh_constants
@@ -373,7 +373,7 @@ body: |
%r0 = COPY %9(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_bicrr
@@ -400,13 +400,13 @@ body: |
%2(s32) = G_CONSTANT i32 -1
%3(s32) = G_XOR %1, %2
%4(s32) = G_AND %0, %3
- ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICrr [[VREGX]], [[VREGY]], 14, _, _
+ ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICrr [[VREGX]], [[VREGY]], 14, %noreg, %noreg
%r0 = COPY %4(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_bicrr_commutative
@@ -433,13 +433,13 @@ body: |
%2(s32) = G_CONSTANT i32 -1
%3(s32) = G_XOR %1, %2
%4(s32) = G_AND %3, %0
- ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICrr [[VREGX]], [[VREGY]], 14, _, _
+ ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICrr [[VREGX]], [[VREGY]], 14, %noreg, %noreg
%r0 = COPY %4(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_bicri
@@ -471,13 +471,13 @@ body: |
%2(s32) = G_CONSTANT i32 -1
%3(s32) = G_XOR %1, %2
%4(s32) = G_AND %0, %3
- ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, _, _
+ ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, %noreg, %noreg
%r0 = COPY %4(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_bicri_commutative_xor
@@ -504,13 +504,13 @@ body: |
%2(s32) = G_CONSTANT i32 -1
%3(s32) = G_XOR %2, %1
%4(s32) = G_AND %0, %3
- ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, _, _
+ ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, %noreg, %noreg
%r0 = COPY %4(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_bicri_commutative_and
@@ -537,13 +537,13 @@ body: |
%2(s32) = G_CONSTANT i32 -1
%3(s32) = G_XOR %1, %2
%4(s32) = G_AND %3, %0
- ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, _, _
+ ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, %noreg, %noreg
%r0 = COPY %4(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_bicri_commutative_both
@@ -570,11 +570,11 @@ body: |
%2(s32) = G_CONSTANT i32 -1
%3(s32) = G_XOR %2, %1
%4(s32) = G_AND %3, %0
- ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, _, _
+ ; CHECK: [[VREGR:%[0-9]+]]:gpr = BICri [[VREGX]], 192, 14, %noreg, %noreg
%r0 = COPY %4(s32)
; CHECK: %r0 = COPY [[VREGR]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
index 64773e7ebb1..7c2666e3680 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
@@ -81,13 +81,13 @@ body: |
; CHECK: [[VREGTRUNC:%[0-9]+]]:gpr = COPY [[VREG]]
%2(s32) = G_ZEXT %1(s1)
- ; CHECK: [[VREGEXT:%[0-9]+]]:gpr = ANDri [[VREGTRUNC]], 1, 14, _, _
+ ; CHECK: [[VREGEXT:%[0-9]+]]:gpr = ANDri [[VREGTRUNC]], 1, 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGEXT]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_trunc_and_sext_s1
@@ -111,14 +111,14 @@ body: |
; CHECK: [[VREGTRUNC:%[0-9]+]]:gpr = COPY [[VREG]]
%2(s32) = G_SEXT %1(s1)
- ; CHECK: [[VREGAND:%[0-9]+]]:gpr = ANDri [[VREGTRUNC]], 1, 14, _, _
- ; CHECK: [[VREGEXT:%[0-9]+]]:gpr = RSBri [[VREGAND]], 0, 14, _, _
+ ; CHECK: [[VREGAND:%[0-9]+]]:gpr = ANDri [[VREGTRUNC]], 1, 14, %noreg, %noreg
+ ; CHECK: [[VREGEXT:%[0-9]+]]:gpr = RSBri [[VREGAND]], 0, 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGEXT]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_trunc_and_sext_s8
@@ -142,13 +142,13 @@ body: |
; CHECK: [[VREGTRUNC:%[0-9]+]]:gprnopc = COPY [[VREG]]
%2(s32) = G_SEXT %1(s8)
- ; CHECK: [[VREGEXT:%[0-9]+]]:gprnopc = SXTB [[VREGTRUNC]], 0, 14, _
+ ; CHECK: [[VREGEXT:%[0-9]+]]:gprnopc = SXTB [[VREGTRUNC]], 0, 14, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGEXT]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_trunc_and_zext_s16
@@ -172,13 +172,13 @@ body: |
; CHECK: [[VREGTRUNC:%[0-9]+]]:gprnopc = COPY [[VREG]]
%2(s32) = G_ZEXT %1(s16)
- ; CHECK: [[VREGEXT:%[0-9]+]]:gprnopc = UXTH [[VREGTRUNC]], 0, 14, _
+ ; CHECK: [[VREGEXT:%[0-9]+]]:gprnopc = UXTH [[VREGTRUNC]], 0, 14, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGEXT]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_trunc_and_anyext_s8
@@ -207,8 +207,8 @@ body: |
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGEXT]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_trunc_and_anyext_s16
@@ -237,8 +237,8 @@ body: |
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGEXT]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_add_s32
@@ -262,13 +262,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1
%2(s32) = G_ADD %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]]:gpr = ADDrr [[VREGX]], [[VREGY]], 14, _, _
+ ; CHECK: [[VREGSUM:%[0-9]+]]:gpr = ADDrr [[VREGX]], [[VREGY]], 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGSUM]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_add_fold_imm_s32
@@ -290,13 +290,13 @@ body: |
%1(s32) = G_CONSTANT i32 255
%2(s32) = G_ADD %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]]:gpr = ADDri [[VREGX]], 255, 14, _, _
+ ; CHECK: [[VREGSUM:%[0-9]+]]:gpr = ADDri [[VREGX]], 255, 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGSUM]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_add_no_fold_imm_s32
@@ -317,16 +317,16 @@ body: |
; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0
%1(s32) = G_CONSTANT i32 65535
- ; CHECK: [[VREGY:%[0-9]+]]:gpr = MOVi16 65535, 14, _
+ ; CHECK: [[VREGY:%[0-9]+]]:gpr = MOVi16 65535, 14, %noreg
%2(s32) = G_ADD %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]]:gpr = ADDrr [[VREGX]], [[VREGY]], 14, _, _
+ ; CHECK: [[VREGSUM:%[0-9]+]]:gpr = ADDrr [[VREGX]], [[VREGY]], 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGSUM]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_fadd_s32
@@ -350,13 +350,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:spr = COPY %s1
%2(s32) = G_FADD %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VADDS [[VREGX]], [[VREGY]], 14, _
+ ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VADDS [[VREGX]], [[VREGY]], 14, %noreg
%s0 = COPY %2(s32)
; CHECK: %s0 = COPY [[VREGSUM]]
- BX_RET 14, _, implicit %s0
- ; CHECK: BX_RET 14, _, implicit %s0
+ BX_RET 14, %noreg, implicit %s0
+ ; CHECK: BX_RET 14, %noreg, implicit %s0
...
---
name: test_fadd_s64
@@ -380,13 +380,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY %d1
%2(s64) = G_FADD %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VADDD [[VREGX]], [[VREGY]], 14, _
+ ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VADDD [[VREGX]], [[VREGY]], 14, %noreg
%d0 = COPY %2(s64)
; CHECK: %d0 = COPY [[VREGSUM]]
- BX_RET 14, _, implicit %d0
- ; CHECK: BX_RET 14, _, implicit %d0
+ BX_RET 14, %noreg, implicit %d0
+ ; CHECK: BX_RET 14, %noreg, implicit %d0
...
---
name: test_fsub_s32
@@ -410,13 +410,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:spr = COPY %s1
%2(s32) = G_FSUB %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VSUBS [[VREGX]], [[VREGY]], 14, _
+ ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VSUBS [[VREGX]], [[VREGY]], 14, %noreg
%s0 = COPY %2(s32)
; CHECK: %s0 = COPY [[VREGSUM]]
- BX_RET 14, _, implicit %s0
- ; CHECK: BX_RET 14, _, implicit %s0
+ BX_RET 14, %noreg, implicit %s0
+ ; CHECK: BX_RET 14, %noreg, implicit %s0
...
---
name: test_fsub_s64
@@ -440,13 +440,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY %d1
%2(s64) = G_FSUB %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VSUBD [[VREGX]], [[VREGY]], 14, _
+ ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VSUBD [[VREGX]], [[VREGY]], 14, %noreg
%d0 = COPY %2(s64)
; CHECK: %d0 = COPY [[VREGSUM]]
- BX_RET 14, _, implicit %d0
- ; CHECK: BX_RET 14, _, implicit %d0
+ BX_RET 14, %noreg, implicit %d0
+ ; CHECK: BX_RET 14, %noreg, implicit %d0
...
---
name: test_fmul_s32
@@ -470,13 +470,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:spr = COPY %s1
%2(s32) = G_FMUL %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VMULS [[VREGX]], [[VREGY]], 14, _
+ ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VMULS [[VREGX]], [[VREGY]], 14, %noreg
%s0 = COPY %2(s32)
; CHECK: %s0 = COPY [[VREGSUM]]
- BX_RET 14, _, implicit %s0
- ; CHECK: BX_RET 14, _, implicit %s0
+ BX_RET 14, %noreg, implicit %s0
+ ; CHECK: BX_RET 14, %noreg, implicit %s0
...
---
name: test_fmul_s64
@@ -500,13 +500,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY %d1
%2(s64) = G_FMUL %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VMULD [[VREGX]], [[VREGY]], 14, _
+ ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VMULD [[VREGX]], [[VREGY]], 14, %noreg
%d0 = COPY %2(s64)
; CHECK: %d0 = COPY [[VREGSUM]]
- BX_RET 14, _, implicit %d0
- ; CHECK: BX_RET 14, _, implicit %d0
+ BX_RET 14, %noreg, implicit %d0
+ ; CHECK: BX_RET 14, %noreg, implicit %d0
...
---
name: test_fdiv_s32
@@ -530,13 +530,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:spr = COPY %s1
%2(s32) = G_FDIV %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VDIVS [[VREGX]], [[VREGY]], 14, _
+ ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VDIVS [[VREGX]], [[VREGY]], 14, %noreg
%s0 = COPY %2(s32)
; CHECK: %s0 = COPY [[VREGSUM]]
- BX_RET 14, _, implicit %s0
- ; CHECK: BX_RET 14, _, implicit %s0
+ BX_RET 14, %noreg, implicit %s0
+ ; CHECK: BX_RET 14, %noreg, implicit %s0
...
---
name: test_fdiv_s64
@@ -560,13 +560,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY %d1
%2(s64) = G_FDIV %0, %1
- ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VDIVD [[VREGX]], [[VREGY]], 14, _
+ ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VDIVD [[VREGX]], [[VREGY]], 14, %noreg
%d0 = COPY %2(s64)
; CHECK: %d0 = COPY [[VREGSUM]]
- BX_RET 14, _, implicit %d0
- ; CHECK: BX_RET 14, _, implicit %d0
+ BX_RET 14, %noreg, implicit %d0
+ ; CHECK: BX_RET 14, %noreg, implicit %d0
...
---
name: test_sub_s32
@@ -590,13 +590,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1
%2(s32) = G_SUB %0, %1
- ; CHECK: [[VREGRES:%[0-9]+]]:gpr = SUBrr [[VREGX]], [[VREGY]], 14, _, _
+ ; CHECK: [[VREGRES:%[0-9]+]]:gpr = SUBrr [[VREGX]], [[VREGY]], 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGRES]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_sub_imm_s32
@@ -618,13 +618,13 @@ body: |
%1(s32) = G_CONSTANT i32 17
%2(s32) = G_SUB %0, %1
- ; CHECK: [[VREGRES:%[0-9]+]]:gpr = SUBri [[VREGX]], 17, 14, _, _
+ ; CHECK: [[VREGRES:%[0-9]+]]:gpr = SUBri [[VREGX]], 17, 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGRES]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_sub_rev_imm_s32
@@ -646,13 +646,13 @@ body: |
%1(s32) = G_CONSTANT i32 17
%2(s32) = G_SUB %1, %0
- ; CHECK: [[VREGRES:%[0-9]+]]:gpr = RSBri [[VREGX]], 17, 14, _, _
+ ; CHECK: [[VREGRES:%[0-9]+]]:gpr = RSBri [[VREGX]], 17, 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGRES]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_mul_s32
@@ -676,13 +676,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1
%2(s32) = G_MUL %0, %1
- ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MUL [[VREGX]], [[VREGY]], 14, _, _
+ ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MUL [[VREGX]], [[VREGY]], 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGRES]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_mulv5_s32
@@ -706,13 +706,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:gprnopc = COPY %r1
%2(s32) = G_MUL %0, %1
- ; CHECK: early-clobber [[VREGRES:%[0-9]+]]:gprnopc = MULv5 [[VREGX]], [[VREGY]], 14, _, _
+ ; CHECK: early-clobber [[VREGRES:%[0-9]+]]:gprnopc = MULv5 [[VREGX]], [[VREGY]], 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGRES]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_sdiv_s32
@@ -736,13 +736,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1
%2(s32) = G_SDIV %0, %1
- ; CHECK: [[VREGRES:%[0-9]+]]:gpr = SDIV [[VREGX]], [[VREGY]], 14, _
+ ; CHECK: [[VREGRES:%[0-9]+]]:gpr = SDIV [[VREGX]], [[VREGY]], 14, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGRES]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_udiv_s32
@@ -766,13 +766,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1
%2(s32) = G_UDIV %0, %1
- ; CHECK: [[VREGRES:%[0-9]+]]:gpr = UDIV [[VREGX]], [[VREGY]], 14, _
+ ; CHECK: [[VREGRES:%[0-9]+]]:gpr = UDIV [[VREGX]], [[VREGY]], 14, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGRES]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_lshr_s32
@@ -796,13 +796,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1
%2(s32) = G_LSHR %0, %1
- ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MOVsr [[VREGX]], [[VREGY]], 3, 14, _, _
+ ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MOVsr [[VREGX]], [[VREGY]], 3, 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGRES]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_ashr_s32
@@ -826,13 +826,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1
%2(s32) = G_ASHR %0, %1
- ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MOVsr [[VREGX]], [[VREGY]], 1, 14, _, _
+ ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MOVsr [[VREGX]], [[VREGY]], 1, 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGRES]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_shl_s32
@@ -856,13 +856,13 @@ body: |
; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1
%2(s32) = G_SHL %0, %1
- ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MOVsr [[VREGX]], [[VREGY]], 2, 14, _, _
+ ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = MOVsr [[VREGX]], [[VREGY]], 2, 14, %noreg, %noreg
%r0 = COPY %2(s32)
; CHECK: %r0 = COPY [[VREGRES]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_load_from_stack
@@ -888,19 +888,19 @@ body: |
liveins: %r0, %r1, %r2, %r3
%0(p0) = G_FRAME_INDEX %fixed-stack.2
- ; CHECK: [[FI32VREG:%[0-9]+]]:gpr = ADDri %fixed-stack.[[FI32]], 0, 14, _, _
+ ; CHECK: [[FI32VREG:%[0-9]+]]:gpr = ADDri %fixed-stack.[[FI32]], 0, 14, %noreg, %noreg
%1(s32) = G_LOAD %0(p0) :: (load 4)
- ; CHECK: [[LD32VREG:%[0-9]+]]:gpr = LDRi12 [[FI32VREG]], 0, 14, _
+ ; CHECK: [[LD32VREG:%[0-9]+]]:gpr = LDRi12 [[FI32VREG]], 0, 14, %noreg
%r0 = COPY %1
; CHECK: %r0 = COPY [[LD32VREG]]
%2(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[FI1VREG:%[0-9]+]]:gpr = ADDri %fixed-stack.[[FI1]], 0, 14, _, _
+ ; CHECK: [[FI1VREG:%[0-9]+]]:gpr = ADDri %fixed-stack.[[FI1]], 0, 14, %noreg, %noreg
%3(s1) = G_LOAD %2(p0) :: (load 1)
- ; CHECK: [[LD1VREG:%[0-9]+]]:gprnopc = LDRBi12 [[FI1VREG]], 0, 14, _
+ ; CHECK: [[LD1VREG:%[0-9]+]]:gprnopc = LDRBi12 [[FI1VREG]], 0, 14, %noreg
%4(s32) = G_ANYEXT %3(s1)
; CHECK: [[RES:%[0-9]+]]:gpr = COPY [[LD1VREG]]
@@ -908,8 +908,8 @@ body: |
%r0 = COPY %4
; CHECK: %r0 = COPY [[RES]]
- BX_RET 14, _
- ; CHECK: BX_RET 14, _
+ BX_RET 14, %noreg
+ ; CHECK: BX_RET 14, %noreg
...
---
name: test_load_f32
@@ -929,13 +929,13 @@ body: |
; CHECK: %[[P:[0-9]+]]:gpr = COPY %r0
%1(s32) = G_LOAD %0(p0) :: (load 4)
- ; CHECK: %[[V:[0-9]+]]:spr = VLDRS %[[P]], 0, 14, _
+ ; CHECK: %[[V:[0-9]+]]:spr = VLDRS %[[P]], 0, 14, %noreg
%s0 = COPY %1
; CHECK: %s0 = COPY %[[V]]
- BX_RET 14, _, implicit %s0
- ; CHECK: BX_RET 14, _, implicit %s0
+ BX_RET 14, %noreg, implicit %s0
+ ; CHECK: BX_RET 14, %noreg, implicit %s0
...
---
name: test_load_f64
@@ -955,13 +955,13 @@ body: |
; CHECK: %[[P:[0-9]+]]:gpr = COPY %r0
%1(s64) = G_LOAD %0(p0) :: (load 8)
- ; CHECK: %[[V:[0-9]+]]:dpr = VLDRD %[[P]], 0, 14, _
+ ; CHECK: %[[V:[0-9]+]]:dpr = VLDRD %[[P]], 0, 14, %noreg
%d0 = COPY %1
; CHECK: %d0 = COPY %[[V]]
- BX_RET 14, _, implicit %d0
- ; CHECK: BX_RET 14, _, implicit %d0
+ BX_RET 14, %noreg, implicit %d0
+ ; CHECK: BX_RET 14, %noreg, implicit %d0
...
---
name: test_stores
@@ -995,21 +995,21 @@ body: |
%2(s16) = G_TRUNC %3(s32)
G_STORE %1(s8), %0(p0) :: (store 1)
- ; CHECK: STRBi12 %[[I8]], %[[P]], 0, 14, _
+ ; CHECK: STRBi12 %[[I8]], %[[P]], 0, 14, %noreg
G_STORE %2(s16), %0(p0) :: (store 2)
- ; CHECK: STRH %[[I16]], %[[P]], _, 0, 14, _
+ ; CHECK: STRH %[[I16]], %[[P]], %noreg, 0, 14, %noreg
G_STORE %3(s32), %0(p0) :: (store 4)
- ; CHECK: STRi12 %[[I32]], %[[P]], 0, 14, _
+ ; CHECK: STRi12 %[[I32]], %[[P]], 0, 14, %noreg
G_STORE %4(s32), %0(p0) :: (store 4)
- ; CHECK: VSTRS %[[F32]], %[[P]], 0, 14, _
+ ; CHECK: VSTRS %[[F32]], %[[P]], 0, 14, %noreg
G_STORE %5(s64), %0(p0) :: (store 8)
- ; CHECK: VSTRD %[[F64]], %[[P]], 0, 14, _
+ ; CHECK: VSTRD %[[F64]], %[[P]], 0, 14, %noreg
- BX_RET 14, _
+ BX_RET 14, %noreg
...
---
name: test_gep
@@ -1033,10 +1033,10 @@ body: |
; CHECK: %[[OFF:[0-9]+]]:gpr = COPY %r1
%2(p0) = G_GEP %0, %1(s32)
- ; CHECK: %[[GEP:[0-9]+]]:gpr = ADDrr %[[PTR]], %[[OFF]], 14, _, _
+ ; CHECK: %[[GEP:[0-9]+]]:gpr = ADDrr %[[PTR]], %[[OFF]], 14, %noreg, %noreg
%r0 = COPY %2(p0)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_constant_imm
@@ -1050,10 +1050,10 @@ registers:
body: |
bb.0:
%0(s32) = G_CONSTANT 42
- ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 42, 14, _, _
+ ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 42, 14, %noreg, %noreg
%r0 = COPY %0(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_constant_cimm
@@ -1069,10 +1069,10 @@ body: |
; Adding a type on G_CONSTANT changes its operand from an Imm into a CImm.
; We still want to see the same thing in the output though.
%0(s32) = G_CONSTANT i32 42
- ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 42, 14, _, _
+ ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 42, 14, %noreg, %noreg
%r0 = COPY %0(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_select_s32
@@ -1100,14 +1100,14 @@ body: |
; CHECK: [[VREGC:%[0-9]+]]:gpr = COPY [[VREGY]]
%3(s32) = G_SELECT %2(s1), %0, %1
- ; CHECK: CMPri [[VREGC]], 0, 14, _, implicit-def %cpsr
+ ; CHECK: CMPri [[VREGC]], 0, 14, %noreg, implicit-def %cpsr
; CHECK: [[RES:%[0-9]+]]:gpr = MOVCCr [[VREGX]], [[VREGY]], 0, %cpsr
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[RES]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_select_ptr
@@ -1139,14 +1139,14 @@ body: |
; CHECK: [[VREGD:%[0-9]+]]:gpr = COPY [[VREGC]]
%4(p0) = G_SELECT %3(s1), %0, %1
- ; CHECK: CMPri [[VREGD]], 0, 14, _, implicit-def %cpsr
+ ; CHECK: CMPri [[VREGD]], 0, 14, %noreg, implicit-def %cpsr
; CHECK: [[RES:%[0-9]+]]:gpr = MOVCCr [[VREGX]], [[VREGY]], 0, %cpsr
%r0 = COPY %4(p0)
; CHECK: %r0 = COPY [[RES]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_br
@@ -1170,7 +1170,7 @@ body: |
; CHECK: [[COND:%[0-9]+]]:gpr = COPY [[COND32]]
G_BRCOND %1(s1), %bb.1
- ; CHECK: TSTri [[COND]], 1, 14, _, implicit-def %cpsr
+ ; CHECK: TSTri [[COND]], 1, 14, %noreg, implicit-def %cpsr
; CHECK: Bcc %bb.1, 1, %cpsr
G_BR %bb.2
; CHECK: B %bb.2
@@ -1185,8 +1185,8 @@ body: |
bb.2:
; CHECK: bb.2
- BX_RET 14, _
- ; CHECK: BX_RET 14, _
+ BX_RET 14, %noreg
+ ; CHECK: BX_RET 14, %noreg
...
---
name: test_soft_fp_double
@@ -1223,6 +1223,6 @@ body: |
%r1 = COPY %4
; CHECK: %r1 = COPY [[OUT2]]
- BX_RET 14, _, implicit %r0, implicit %r1
- ; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+ BX_RET 14, %noreg, implicit %r0, implicit %r1
+ ; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1
...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
index 0994455916e..194c6270437 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
@@ -3,7 +3,7 @@
define void @test_void_return() {
; CHECK-LABEL: name: test_void_return
-; CHECK: BX_RET 14, _
+; CHECK: BX_RET 14, %noreg
entry:
ret void
}
@@ -18,7 +18,7 @@ define signext i1 @test_add_i1(i1 %x, i1 %y) {
; CHECK: [[SUM:%[0-9]+]]:_(s1) = G_ADD [[VREGX]], [[VREGY]]
; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_SEXT [[SUM]]
; CHECK: %r0 = COPY [[EXT]](s32)
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%sum = add i1 %x, %y
ret i1 %sum
@@ -34,7 +34,7 @@ define i8 @test_add_i8(i8 %x, i8 %y) {
; CHECK: [[SUM:%[0-9]+]]:_(s8) = G_ADD [[VREGX]], [[VREGY]]
; CHECK: [[SUM_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUM]]
; CHECK: %r0 = COPY [[SUM_EXT]](s32)
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%sum = add i8 %x, %y
ret i8 %sum
@@ -50,7 +50,7 @@ define i8 @test_sub_i8(i8 %x, i8 %y) {
; CHECK: [[RES:%[0-9]+]]:_(s8) = G_SUB [[VREGX]], [[VREGY]]
; CHECK: [[RES_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[RES]]
; CHECK: %r0 = COPY [[RES_EXT]](s32)
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%res = sub i8 %x, %y
ret i8 %res
@@ -63,7 +63,7 @@ define signext i8 @test_return_sext_i8(i8 %x) {
; CHECK: [[VREG:%[0-9]+]]:_(s8) = G_TRUNC [[VREGR0]]
; CHECK: [[VREGEXT:%[0-9]+]]:_(s32) = G_SEXT [[VREG]]
; CHECK: %r0 = COPY [[VREGEXT]](s32)
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
ret i8 %x
}
@@ -78,7 +78,7 @@ define i16 @test_add_i16(i16 %x, i16 %y) {
; CHECK: [[SUM:%[0-9]+]]:_(s16) = G_ADD [[VREGX]], [[VREGY]]
; CHECK: [[SUM_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUM]]
; CHECK: %r0 = COPY [[SUM_EXT]](s32)
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%sum = add i16 %x, %y
ret i16 %sum
@@ -94,7 +94,7 @@ define i16 @test_sub_i16(i16 %x, i16 %y) {
; CHECK: [[RES:%[0-9]+]]:_(s16) = G_SUB [[VREGX]], [[VREGY]]
; CHECK: [[RES_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[RES]]
; CHECK: %r0 = COPY [[RES_EXT]](s32)
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%res = sub i16 %x, %y
ret i16 %res
@@ -107,7 +107,7 @@ define zeroext i16 @test_return_zext_i16(i16 %x) {
; CHECK: [[VREG:%[0-9]+]]:_(s16) = G_TRUNC [[VREGR0]]
; CHECK: [[VREGEXT:%[0-9]+]]:_(s32) = G_ZEXT [[VREG]]
; CHECK: %r0 = COPY [[VREGEXT]](s32)
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
ret i16 %x
}
@@ -119,7 +119,7 @@ define i32 @test_add_i32(i32 %x, i32 %y) {
; CHECK-DAG: [[VREGY:%[0-9]+]]:_(s32) = COPY %r1
; CHECK: [[SUM:%[0-9]+]]:_(s32) = G_ADD [[VREGX]], [[VREGY]]
; CHECK: %r0 = COPY [[SUM]](s32)
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%sum = add i32 %x, %y
ret i32 %sum
@@ -132,7 +132,7 @@ define i32 @test_sub_i32(i32 %x, i32 %y) {
; CHECK-DAG: [[VREGY:%[0-9]+]]:_(s32) = COPY %r1
; CHECK: [[RES:%[0-9]+]]:_(s32) = G_SUB [[VREGX]], [[VREGY]]
; CHECK: %r0 = COPY [[RES]](s32)
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%res = sub i32 %x, %y
ret i32 %res
@@ -149,7 +149,7 @@ define i32 @test_stack_args(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5
; CHECK: [[VREGP5:%[0-9]+]]:_(s32) = G_LOAD [[FIP5]]{{.*}}load 4
; CHECK: [[SUM:%[0-9]+]]:_(s32) = G_ADD [[VREGP2]], [[VREGP5]]
; CHECK: %r0 = COPY [[SUM]]
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%sum = add i32 %p2, %p5
ret i32 %sum
@@ -170,7 +170,7 @@ define i16 @test_stack_args_signext(i32 %p0, i16 %p1, i8 %p2, i1 %p3,
; CHECK: [[SUM:%[0-9]+]]:_(s16) = G_ADD [[VREGP1]], [[VREGP5]]
; CHECK: [[SUM_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUM]]
; CHECK: %r0 = COPY [[SUM_EXT]](s32)
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%sum = add i16 %p1, %p5
ret i16 %sum
@@ -191,7 +191,7 @@ define i8 @test_stack_args_zeroext(i32 %p0, i16 %p1, i8 %p2, i1 %p3,
; CHECK: [[SUM:%[0-9]+]]:_(s8) = G_ADD [[VREGP2]], [[VREGP4]]
; CHECK: [[SUM_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUM]]
; CHECK: %r0 = COPY [[SUM_EXT]](s32)
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%sum = add i8 %p2, %p4
ret i8 %sum
@@ -211,7 +211,7 @@ define i8 @test_stack_args_noext(i32 %p0, i16 %p1, i8 %p2, i1 %p3,
; CHECK: [[SUM:%[0-9]+]]:_(s8) = G_ADD [[VREGP2]], [[VREGP4]]
; CHECK: [[SUM_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUM]]
; CHECK: %r0 = COPY [[SUM_EXT]](s32)
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%sum = add i8 %p2, %p4
ret i8 %sum
@@ -229,7 +229,7 @@ define zeroext i16 @test_stack_args_extend_the_extended(i32 %p0, i16 %p1, i8 %p2
; CHECK: [[VREGP5:%[0-9]+]]:_(s16) = G_TRUNC [[VREGP5SEXT]]
; CHECK: [[VREGP5ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[VREGP5]]
; CHECK: %r0 = COPY [[VREGP5ZEXT]]
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
ret i16 %p5
}
@@ -251,7 +251,7 @@ define i32* @test_ptr_ret(i32** %p) {
; CHECK: [[VREGP:%[0-9]+]]:_(p0) = COPY %r0
; CHECK: [[VREGV:%[0-9]+]]:_(p0) = G_LOAD [[VREGP]](p0){{.*}}load 4
; CHECK: %r0 = COPY [[VREGV]]
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%v = load i32*, i32** %p
ret i32* %v
@@ -266,7 +266,7 @@ define i32 @test_ptr_arg_on_stack(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32* %p) {
; CHECK: [[VREGP:%[0-9]+]]:_(p0) = G_LOAD [[FIP]](p0){{.*}}load 4
; CHECK: [[VREGV:%[0-9]+]]:_(s32) = G_LOAD [[VREGP]](p0){{.*}}load 4
; CHECK: %r0 = COPY [[VREGV]]
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%v = load i32, i32* %p
ret i32 %v
@@ -284,7 +284,7 @@ define arm_aapcscc float @test_float_aapcscc(float %p0, float %p1, float %p2,
; CHECK: [[VREGP5:%[0-9]+]]:_(s32) = G_LOAD [[FIP5]](p0){{.*}}load 4
; CHECK: [[VREGV:%[0-9]+]]:_(s32) = G_FADD [[VREGP1]], [[VREGP5]]
; CHECK: %r0 = COPY [[VREGV]]
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%v = fadd float %p1, %p5
ret float %v
@@ -313,7 +313,7 @@ define arm_aapcs_vfpcc float @test_float_vfpcc(float %p0, float %p1, float %p2,
; CHECK: [[VREGQ1:%[0-9]+]]:_(s32) = G_LOAD [[FIQ1]](p0){{.*}}load 4
; CHECK: [[VREGV:%[0-9]+]]:_(s32) = G_FADD [[VREGP1]], [[VREGQ1]]
; CHECK: %s0 = COPY [[VREGV]]
-; CHECK: BX_RET 14, _, implicit %s0
+; CHECK: BX_RET 14, %noreg, implicit %s0
entry:
%v = fadd float %p1, %q1
ret float %v
@@ -334,7 +334,7 @@ define arm_aapcs_vfpcc double @test_double_vfpcc(double %p0, double %p1, double
; CHECK: [[VREGQ1:%[0-9]+]]:_(s64) = G_LOAD [[FIQ1]](p0){{.*}}load 8
; CHECK: [[VREGV:%[0-9]+]]:_(s64) = G_FADD [[VREGP1]], [[VREGQ1]]
; CHECK: %d0 = COPY [[VREGV]]
-; CHECK: BX_RET 14, _, implicit %d0
+; CHECK: BX_RET 14, %noreg, implicit %d0
entry:
%v = fadd double %p1, %q1
ret double %v
@@ -360,7 +360,7 @@ define arm_aapcscc double @test_double_aapcscc(double %p0, double %p1, double %p
; BIG: [[VREGVHI:%[0-9]+]]:_(s32), [[VREGVLO:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[VREGV]](s64)
; CHECK-DAG: %r0 = COPY [[VREGVLO]]
; CHECK-DAG: %r1 = COPY [[VREGVHI]]
-; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1
entry:
%v = fadd double %p1, %p5
ret double %v
@@ -382,7 +382,7 @@ define arm_aapcs_vfpcc double @test_double_gap_vfpcc(double %p0, float %filler,
; CHECK: [[VREGQ1:%[0-9]+]]:_(s64) = G_LOAD [[FIQ1]](p0){{.*}}load 8
; CHECK: [[VREGV:%[0-9]+]]:_(s64) = G_FADD [[VREGP1]], [[VREGQ1]]
; CHECK: %d0 = COPY [[VREGV]]
-; CHECK: BX_RET 14, _, implicit %d0
+; CHECK: BX_RET 14, %noreg, implicit %d0
entry:
%v = fadd double %p1, %q1
ret double %v
@@ -405,7 +405,7 @@ define arm_aapcscc double @test_double_gap_aapcscc(float %filler, double %p0,
; BIG: [[VREGVHI:%[0-9]+]]:_(s32), [[VREGVLO:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[VREGV]](s64)
; CHECK-DAG: %r0 = COPY [[VREGVLO]]
; CHECK-DAG: %r1 = COPY [[VREGVHI]]
-; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1
entry:
%v = fadd double %p0, %p1
ret double %v
@@ -428,7 +428,7 @@ define arm_aapcscc double @test_double_gap2_aapcscc(double %p0, float %filler,
; BIG: [[VREGVHI:%[0-9]+]]:_(s32), [[VREGVLO:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[VREGV]](s64)
; CHECK-DAG: %r0 = COPY [[VREGVLO]]
; CHECK-DAG: %r1 = COPY [[VREGVHI]]
-; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1
entry:
%v = fadd double %p0, %p1
ret double %v
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir
index 6596036ab69..35afaaadd94 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir
@@ -55,7 +55,7 @@ body: |
%2(s32) = G_SDIV %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_udiv_i32
@@ -91,7 +91,7 @@ body: |
%2(s32) = G_UDIV %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_sdiv_i16
@@ -145,7 +145,7 @@ body: |
; CHECK: %r0 = COPY [[R]]
%5(s32) = G_SEXT %4(s16)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_udiv_i16
@@ -197,7 +197,7 @@ body: |
; CHECK: %r0 = COPY [[R]]
%5(s32) = G_ZEXT %4(s16)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_sdiv_i8
@@ -251,7 +251,7 @@ body: |
; CHECK: %r0 = COPY [[R]]
%5(s32) = G_SEXT %4(s8)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_udiv_i8
@@ -303,7 +303,7 @@ body: |
; CHECK: %r0 = COPY [[R]]
%5(s32) = G_ZEXT %4(s8)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_srem_i32
@@ -341,7 +341,7 @@ body: |
%2(s32) = G_SREM %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_urem_i32
@@ -379,7 +379,7 @@ body: |
%2(s32) = G_UREM %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_srem_i16
@@ -435,7 +435,7 @@ body: |
; CHECK: %r0 = COPY [[R]]
%5(s32) = G_SEXT %4(s16)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_urem_i16
@@ -489,7 +489,7 @@ body: |
; CHECK: %r0 = COPY [[R]]
%5(s32) = G_ZEXT %4(s16)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_srem_i8
@@ -545,7 +545,7 @@ body: |
; CHECK: %r0 = COPY [[R]]
%5(s32) = G_SEXT %4(s8)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_urem_i8
@@ -599,5 +599,5 @@ body: |
; CHECK: %r0 = COPY [[R]]
%5(s32) = G_ZEXT %4(s8)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir
index cd02da286d2..4f48581fdf3 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir
@@ -93,7 +93,7 @@ body: |
%2(s32) = G_FREM %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_frem_double
@@ -151,7 +151,7 @@ body: |
%7(s32), %8(s32) = G_UNMERGE_VALUES %6(s64)
%r0 = COPY %7(s32)
%r1 = COPY %8(s32)
- BX_RET 14, _, implicit %r0, implicit %r1
+ BX_RET 14, %noreg, implicit %r0, implicit %r1
...
---
name: test_fpow_float
@@ -188,7 +188,7 @@ body: |
%2(s32) = G_FPOW %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fpow_double
@@ -246,7 +246,7 @@ body: |
%7(s32), %8(s32) = G_UNMERGE_VALUES %6(s64)
%r0 = COPY %7(s32)
%r1 = COPY %8(s32)
- BX_RET 14, _, implicit %r0, implicit %r1
+ BX_RET 14, %noreg, implicit %r0, implicit %r1
...
---
name: test_fadd_float
@@ -281,7 +281,7 @@ body: |
%2(s32) = G_FADD %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fadd_double
@@ -333,7 +333,7 @@ body: |
%7(s32),%8(s32) = G_UNMERGE_VALUES %6(s64)
%r0 = COPY %7(s32)
%r1 = COPY %8(s32)
- BX_RET 14, _, implicit %r0, implicit %r1
+ BX_RET 14, %noreg, implicit %r0, implicit %r1
...
---
name: test_fsub_float
@@ -368,7 +368,7 @@ body: |
%2(s32) = G_FSUB %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fsub_double
@@ -420,7 +420,7 @@ body: |
%7(s32),%8(s32) = G_UNMERGE_VALUES %6(s64)
%r0 = COPY %7(s32)
%r1 = COPY %8(s32)
- BX_RET 14, _, implicit %r0, implicit %r1
+ BX_RET 14, %noreg, implicit %r0, implicit %r1
...
---
name: test_fmul_float
@@ -455,7 +455,7 @@ body: |
%2(s32) = G_FMUL %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fmul_double
@@ -507,7 +507,7 @@ body: |
%7(s32),%8(s32) = G_UNMERGE_VALUES %6(s64)
%r0 = COPY %7(s32)
%r1 = COPY %8(s32)
- BX_RET 14, _, implicit %r0, implicit %r1
+ BX_RET 14, %noreg, implicit %r0, implicit %r1
...
---
name: test_fdiv_float
@@ -542,7 +542,7 @@ body: |
%2(s32) = G_FDIV %0, %1
; CHECK: %r0 = COPY [[R]]
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fdiv_double
@@ -594,7 +594,7 @@ body: |
%7(s32),%8(s32) = G_UNMERGE_VALUES %6(s64)
%r0 = COPY %7(s32)
%r1 = COPY %8(s32)
- BX_RET 14, _, implicit %r0, implicit %r1
+ BX_RET 14, %noreg, implicit %r0, implicit %r1
...
---
name: test_fcmp_true_s32
@@ -618,7 +618,7 @@ body: |
%2(s1) = G_FCMP floatpred(true), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
; HARD-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0
; HARD-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1
; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(true), [[X]](s32), [[Y]]
@@ -655,7 +655,7 @@ body: |
%2(s1) = G_FCMP floatpred(false), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
; HARD-DAG: [[X:%[0-9]+]]:_(s32) = COPY %r0
; HARD-DAG: [[Y:%[0-9]+]]:_(s32) = COPY %r1
; HARD: [[R:%[0-9]+]]:_(s1) = G_FCMP floatpred(false), [[X]](s32), [[Y]]
@@ -714,7 +714,7 @@ body: |
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ogt_s32
@@ -760,7 +760,7 @@ body: |
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_oge_s32
@@ -806,7 +806,7 @@ body: |
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_olt_s32
@@ -852,7 +852,7 @@ body: |
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ole_s32
@@ -898,7 +898,7 @@ body: |
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ord_s32
@@ -938,7 +938,7 @@ body: |
; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ugt_s32
@@ -979,7 +979,7 @@ body: |
; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_uge_s32
@@ -1020,7 +1020,7 @@ body: |
; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ult_s32
@@ -1061,7 +1061,7 @@ body: |
; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ule_s32
@@ -1102,7 +1102,7 @@ body: |
; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_une_s32
@@ -1143,7 +1143,7 @@ body: |
; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_uno_s32
@@ -1189,7 +1189,7 @@ body: |
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_one_s32
@@ -1249,7 +1249,7 @@ body: |
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ueq_s32
@@ -1309,7 +1309,7 @@ body: |
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_true_s64
@@ -1358,7 +1358,7 @@ body: |
%7(s32) = G_ZEXT %6(s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_false_s64
@@ -1408,7 +1408,7 @@ body: |
%7(s32) = G_ZEXT %6(s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_oeq_s64
@@ -1468,7 +1468,7 @@ body: |
%7(s32) = G_ZEXT %6(s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ogt_s64
@@ -1528,7 +1528,7 @@ body: |
%7(s32) = G_ZEXT %6(s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_oge_s64
@@ -1588,7 +1588,7 @@ body: |
%7(s32) = G_ZEXT %6(s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_olt_s64
@@ -1648,7 +1648,7 @@ body: |
%7(s32) = G_ZEXT %6(s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ole_s64
@@ -1708,7 +1708,7 @@ body: |
%7(s32) = G_ZEXT %6(s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ord_s64
@@ -1762,7 +1762,7 @@ body: |
; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ugt_s64
@@ -1817,7 +1817,7 @@ body: |
; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_uge_s64
@@ -1872,7 +1872,7 @@ body: |
; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ult_s64
@@ -1927,7 +1927,7 @@ body: |
; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ule_s64
@@ -1982,7 +1982,7 @@ body: |
; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_une_s64
@@ -2037,7 +2037,7 @@ body: |
; CHECK: [[REXT:%[0-9]+]]:_(s32) = G_ZEXT [[R]](s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_uno_s64
@@ -2097,7 +2097,7 @@ body: |
%7(s32) = G_ZEXT %6(s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_one_s64
@@ -2173,7 +2173,7 @@ body: |
%7(s32) = G_ZEXT %6(s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_fcmp_ueq_s64
@@ -2249,5 +2249,5 @@ body: |
%7(s32) = G_ZEXT %6(s1)
%r0 = COPY %7(s32)
; CHECK: %r0 = COPY [[REXT]]
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
index 816c042a6d5..e3e206cf76e 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
@@ -74,7 +74,7 @@ body: |
; G_SEXT with s8 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(s32) = G_SEXT {{%[0-9]+}}
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_zext_s16
@@ -98,7 +98,7 @@ body: |
; G_ZEXT with s16 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(s32) = G_ZEXT {{%[0-9]+}}
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_add_s8
@@ -130,7 +130,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_ADD {{%[0-9]+, %[0-9]+}}
%5(s32) = G_SEXT %4(s8)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_add_s16
@@ -162,7 +162,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_ADD {{%[0-9]+, %[0-9]+}}
%5(s32) = G_SEXT %4(s16)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_add_s32
@@ -186,7 +186,7 @@ body: |
; G_ADD with s32 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(s32) = G_ADD {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -219,7 +219,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_SUB {{%[0-9]+, %[0-9]+}}
%5(s32) = G_SEXT %4(s8)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_sub_s16
@@ -251,7 +251,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_SUB {{%[0-9]+, %[0-9]+}}
%5(s32) = G_SEXT %4(s16)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_sub_s32
@@ -275,7 +275,7 @@ body: |
; G_SUB with s32 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(s32) = G_SUB {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -308,7 +308,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_MUL {{%[0-9]+, %[0-9]+}}
%5(s32) = G_SEXT %4(s8)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_mul_s16
@@ -340,7 +340,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_MUL {{%[0-9]+, %[0-9]+}}
%5(s32) = G_SEXT %4(s16)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_mul_s32
@@ -364,7 +364,7 @@ body: |
; G_MUL with s32 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(s32) = G_MUL {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -397,7 +397,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_AND {{%[0-9]+, %[0-9]+}}
%5(s32) = G_SEXT %4(s8)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_and_s16
@@ -429,7 +429,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_AND {{%[0-9]+, %[0-9]+}}
%5(s32) = G_SEXT %4(s16)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_and_s32
@@ -453,7 +453,7 @@ body: |
; G_AND with s32 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(s32) = G_AND {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -486,7 +486,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_OR {{%[0-9]+, %[0-9]+}}
%5(s32) = G_SEXT %4(s8)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_or_s16
@@ -518,7 +518,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_OR {{%[0-9]+, %[0-9]+}}
%5(s32) = G_SEXT %4(s16)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_or_s32
@@ -542,7 +542,7 @@ body: |
; G_OR with s32 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(s32) = G_OR {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -575,7 +575,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_XOR {{%[0-9]+, %[0-9]+}}
%5(s32) = G_SEXT %4(s8)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_xor_s16
@@ -607,7 +607,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_XOR {{%[0-9]+, %[0-9]+}}
%5(s32) = G_SEXT %4(s16)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_xor_s32
@@ -631,7 +631,7 @@ body: |
; G_XOR with s32 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(s32) = G_XOR {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -656,7 +656,7 @@ body: |
; G_LSHR with s32 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(s32) = G_LSHR {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -681,7 +681,7 @@ body: |
; G_ASHR with s32 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(s32) = G_ASHR {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -706,7 +706,7 @@ body: |
; G_SHL with s32 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(s32) = G_SHL {{%[0-9]+, %[0-9]+}}
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -737,7 +737,7 @@ body: |
%0(p0) = G_FRAME_INDEX %fixed-stack.2
%1(s32) = G_LOAD %0(p0) :: (load 4)
%r0 = COPY %1(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_legal_loads_stores
@@ -785,7 +785,7 @@ body: |
G_STORE %5(s1), %0(p0) :: (store 1)
%6(p0) = G_LOAD %0(p0) :: (load 4)
G_STORE %6(p0), %0(p0) :: (store 4)
- BX_RET 14, _
+ BX_RET 14, %noreg
...
---
name: test_gep
@@ -810,7 +810,7 @@ body: |
%2(p0) = G_GEP %0, %1(s32)
%r0 = COPY %2(p0)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_constants
@@ -857,7 +857,7 @@ body: |
; CHECK-NOT: G_CONSTANT i1
%r0 = COPY %0(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_icmp_s8
@@ -888,7 +888,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s1) = G_ICMP intpred(ne), {{%[0-9]+}}(s8), {{%[0-9]+}}
%5(s32) = G_ZEXT %4(s1)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_icmp_s16
@@ -919,7 +919,7 @@ body: |
; CHECK-NOT: {{%[0-9]+}}:_(s1) = G_ICMP intpred(slt), {{%[0-9]+}}(s16), {{%[0-9]+}}
%5(s32) = G_ZEXT %4(s1)
%r0 = COPY %5(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_icmp_s32
@@ -945,7 +945,7 @@ body: |
; CHECK: {{%[0-9]+}}:_(s1) = G_ICMP intpred(eq), {{%[0-9]+}}(s32), {{%[0-9]+}}
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_select_s32
@@ -971,7 +971,7 @@ body: |
; G_SELECT with s32 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(s32) = G_SELECT {{%[0-9]+}}(s1), {{%[0-9]+}}, {{%[0-9]+}}
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_select_ptr
@@ -997,7 +997,7 @@ body: |
; G_SELECT with p0 is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(p0) = G_SELECT {{%[0-9]+}}(s1), {{%[0-9]+}}, {{%[0-9]+}}
%r0 = COPY %3(p0)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_brcond
@@ -1026,11 +1026,11 @@ body: |
bb.1:
%r0 = COPY %1(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
bb.2:
%r0 = COPY %0(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -1053,6 +1053,6 @@ body: |
; G_GLOBAL_VALUE is legal, so we should find it unchanged in the output
; CHECK: {{%[0-9]+}}:_(p0) = G_GLOBAL_VALUE @a_global
%r0 = COPY %1(p0)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll b/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll
index 92c4e2905d8..fd484287398 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll
@@ -7,14 +7,14 @@ define arm_aapcscc i32* @test_call_simple_reg_params(i32 *%a, i32 %b) {
; CHECK-LABEL: name: test_call_simple_reg_params
; CHECK-DAG: [[AVREG:%[0-9]+]]:_(p0) = COPY %r0
; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY %r1
-; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK-DAG: %r0 = COPY [[BVREG]]
; CHECK-DAG: %r1 = COPY [[AVREG]]
; CHECK: BL @simple_reg_params_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit-def %r0
; CHECK: [[RVREG:%[0-9]+]]:_(p0) = COPY %r0
-; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: %r0 = COPY [[RVREG]]
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%r = notail call arm_aapcscc i32 *@simple_reg_params_target(i32 %b, i32 *%a)
ret i32 *%r
@@ -26,7 +26,7 @@ define arm_aapcscc i32* @test_call_simple_stack_params(i32 *%a, i32 %b) {
; CHECK-LABEL: name: test_call_simple_stack_params
; CHECK-DAG: [[AVREG:%[0-9]+]]:_(p0) = COPY %r0
; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY %r1
-; CHECK: ADJCALLSTACKDOWN 8, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 8, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK-DAG: %r0 = COPY [[BVREG]]
; CHECK-DAG: %r1 = COPY [[AVREG]]
; CHECK-DAG: %r2 = COPY [[BVREG]]
@@ -41,9 +41,9 @@ define arm_aapcscc i32* @test_call_simple_stack_params(i32 *%a, i32 %b) {
; CHECK: G_STORE [[AVREG]](p0), [[FI2]](p0){{.*}}store 4
; CHECK: BL @simple_stack_params_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
; CHECK: [[RVREG:%[0-9]+]]:_(p0) = COPY %r0
-; CHECK: ADJCALLSTACKUP 8, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 8, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: %r0 = COPY [[RVREG]]
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%r = notail call arm_aapcscc i32 *@simple_stack_params_target(i32 %b, i32 *%a, i32 %b, i32 *%a, i32 %b, i32 *%a)
ret i32 *%r
@@ -59,7 +59,7 @@ define arm_aapcscc signext i16 @test_call_ext_params(i8 %a, i16 %b, i1 %c) {
; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s16) = G_TRUNC [[R1VREG]]
; CHECK-DAG: [[R2VREG:%[0-9]+]]:_(s32) = COPY %r2
; CHECK-DAG: [[CVREG:%[0-9]+]]:_(s1) = G_TRUNC [[R2VREG]]
-; CHECK: ADJCALLSTACKDOWN 20, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 20, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[SEXTA:%[0-9]+]]:_(s32) = G_SEXT [[AVREG]](s8)
; CHECK: %r0 = COPY [[SEXTA]]
; CHECK: [[ZEXTA:%[0-9]+]]:_(s32) = G_ZEXT [[AVREG]](s8)
@@ -96,10 +96,10 @@ define arm_aapcscc signext i16 @test_call_ext_params(i8 %a, i16 %b, i1 %c) {
; CHECK: BL @ext_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
; CHECK: [[R0VREG:%[0-9]+]]:_(s32) = COPY %r0
; CHECK: [[RVREG:%[0-9]+]]:_(s16) = G_TRUNC [[R0VREG]]
-; CHECK: ADJCALLSTACKUP 20, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 20, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[RExtVREG:%[0-9]+]]:_(s32) = G_SEXT [[RVREG]]
; CHECK: %r0 = COPY [[RExtVREG]]
-; CHECK: BX_RET 14, _, implicit %r0
+; CHECK: BX_RET 14, %noreg, implicit %r0
entry:
%r = notail call arm_aapcscc signext i16 @ext_target(i8 signext %a, i8 zeroext %a, i16 signext %b, i16 zeroext %b, i8 signext %a, i8 zeroext %a, i16 signext %b, i16 zeroext %b, i1 zeroext %c)
ret i16 %r
@@ -111,14 +111,14 @@ define arm_aapcs_vfpcc double @test_call_vfpcc_fp_params(double %a, float %b) {
; CHECK-LABEL: name: test_call_vfpcc_fp_params
; CHECK-DAG: [[AVREG:%[0-9]+]]:_(s64) = COPY %d0
; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY %s2
-; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK-DAG: %s0 = COPY [[BVREG]]
; CHECK-DAG: %d1 = COPY [[AVREG]]
; CHECK: BL @vfpcc_fp_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %d1, implicit-def %d0
; CHECK: [[RVREG:%[0-9]+]]:_(s64) = COPY %d0
-; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: %d0 = COPY [[RVREG]]
-; CHECK: BX_RET 14, _, implicit %d0
+; CHECK: BX_RET 14, %noreg, implicit %d0
entry:
%r = notail call arm_aapcs_vfpcc double @vfpcc_fp_target(float %b, double %a)
ret double %r
@@ -133,7 +133,7 @@ define arm_aapcscc double @test_call_aapcs_fp_params(double %a, float %b) {
; LITTLE-DAG: [[AVREG:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[A1]](s32), [[A2]](s32)
; BIG-DAG: [[AVREG:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[A2]](s32), [[A1]](s32)
; CHECK-DAG: [[BVREG:%[0-9]+]]:_(s32) = COPY %r2
-; CHECK: ADJCALLSTACKDOWN 16, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 16, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK-DAG: %r0 = COPY [[BVREG]]
; CHECK-DAG: [[A1:%[0-9]+]]:_(s32), [[A2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AVREG]](s64)
; LITTLE-DAG: %r2 = COPY [[A1]]
@@ -153,13 +153,13 @@ define arm_aapcscc double @test_call_aapcs_fp_params(double %a, float %b) {
; CHECK-DAG: [[R2:%[0-9]+]]:_(s32) = COPY %r1
; LITTLE: [[RVREG:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R1]](s32), [[R2]](s32)
; BIG: [[RVREG:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R2]](s32), [[R1]](s32)
-; CHECK: ADJCALLSTACKUP 16, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 16, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[RVREG]](s64)
; LITTLE-DAG: %r0 = COPY [[R1]]
; LITTLE-DAG: %r1 = COPY [[R2]]
; BIG-DAG: %r0 = COPY [[R2]]
; BIG-DAG: %r1 = COPY [[R1]]
-; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1
entry:
%r = notail call arm_aapcscc double @aapcscc_fp_target(float %b, double %a, float %b, double %a)
ret double %r
@@ -170,13 +170,13 @@ declare arm_aapcscc float @different_call_conv_target(float)
define arm_aapcs_vfpcc float @test_call_different_call_conv(float %x) {
; CHECK-LABEL: name: test_call_different_call_conv
; CHECK: [[X:%[0-9]+]]:_(s32) = COPY %s0
-; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: %r0 = COPY [[X]]
; CHECK: BL @different_call_conv_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit-def %r0
; CHECK: [[R:%[0-9]+]]:_(s32) = COPY %r0
-; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: %s0 = COPY [[R]]
-; CHECK: BX_RET 14, _, implicit %s0
+; CHECK: BX_RET 14, %noreg, implicit %s0
entry:
%r = notail call arm_aapcscc float @different_call_conv_target(float %x)
ret float %r
@@ -190,7 +190,7 @@ define arm_aapcscc [3 x i32] @test_tiny_int_arrays([2 x i32] %arr) {
; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY %r0
; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %r1
; CHECK: [[ARG_ARR:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32)
-; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARG_ARR]](s64)
; CHECK: %r0 = COPY [[R0]]
; CHECK: %r1 = COPY [[R1]]
@@ -199,7 +199,7 @@ define arm_aapcscc [3 x i32] @test_tiny_int_arrays([2 x i32] %arr) {
; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %r1
; CHECK: [[R2:%[0-9]+]]:_(s32) = COPY %r2
; CHECK: [[RES_ARR:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32), [[R2]](s32)
-; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[RES_ARR]](s96)
; FIXME: This doesn't seem correct with regard to the AAPCS docs (which say
; that composite types larger than 4 bytes should be passed through memory),
@@ -207,7 +207,7 @@ define arm_aapcscc [3 x i32] @test_tiny_int_arrays([2 x i32] %arr) {
; CHECK: %r0 = COPY [[R0]]
; CHECK: %r1 = COPY [[R1]]
; CHECK: %r2 = COPY [[R2]]
-; CHECK: BX_RET 14, _, implicit %r0, implicit %r1, implicit %r2
+; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1, implicit %r2
entry:
%r = notail call arm_aapcscc [3 x i32] @tiny_int_arrays_target([2 x i32] %arr)
ret [3 x i32] %r
@@ -224,7 +224,7 @@ define arm_aapcscc void @test_multiple_int_arrays([2 x i32] %arr0, [2 x i32] %ar
; CHECK: [[R3:%[0-9]+]]:_(s32) = COPY %r3
; CHECK: [[ARG_ARR0:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32)
; CHECK: [[ARG_ARR1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R2]](s32), [[R3]](s32)
-; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARG_ARR0]](s64)
; CHECK: [[R2:%[0-9]+]]:_(s32), [[R3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARG_ARR1]](s64)
; CHECK: %r0 = COPY [[R0]]
@@ -232,8 +232,8 @@ define arm_aapcscc void @test_multiple_int_arrays([2 x i32] %arr0, [2 x i32] %ar
; CHECK: %r2 = COPY [[R2]]
; CHECK: %r3 = COPY [[R3]]
; CHECK: BL @multiple_int_arrays_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit %r3
-; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
-; CHECK: BX_RET 14, _
+; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
+; CHECK: BX_RET 14, %noreg
entry:
notail call arm_aapcscc void @multiple_int_arrays_target([2 x i32] %arr0, [2 x i32] %arr1)
ret void
@@ -258,7 +258,7 @@ define arm_aapcscc void @test_large_int_arrays([20 x i32] %arr) {
; CHECK: [[LAST_STACK_ELEMENT_FI:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[LAST_STACK_ID]]
; CHECK: [[LAST_STACK_ELEMENT:%[0-9]+]]:_(s32) = G_LOAD [[LAST_STACK_ELEMENT_FI]]{{.*}}load 4 from %fixed-stack.[[LAST_STACK_ID]]
; CHECK: [[ARG_ARR:%[0-9]+]]:_(s640) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32), [[R2]](s32), [[R3]](s32), [[FIRST_STACK_ELEMENT]](s32), {{.*}}, [[LAST_STACK_ELEMENT]](s32)
-; CHECK: ADJCALLSTACKDOWN 64, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 64, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32), [[R3:%[0-9]+]]:_(s32), [[FIRST_STACK_ELEMENT:%[0-9]+]]:_(s32), {{.*}}, [[LAST_STACK_ELEMENT:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARG_ARR]](s640)
; CHECK: %r0 = COPY [[R0]]
; CHECK: %r1 = COPY [[R1]]
@@ -275,8 +275,8 @@ define arm_aapcscc void @test_large_int_arrays([20 x i32] %arr) {
; CHECK: [[LAST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF_LAST_ELEMENT]](s32)
; CHECK: G_STORE [[LAST_STACK_ELEMENT]](s32), [[LAST_STACK_ARG_ADDR]]{{.*}}store 4
; CHECK: BL @large_int_arrays_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit %r3
-; CHECK: ADJCALLSTACKUP 64, 0, 14, _, implicit-def %sp, implicit %sp
-; CHECK: BX_RET 14, _
+; CHECK: ADJCALLSTACKUP 64, 0, 14, %noreg, implicit-def %sp, implicit %sp
+; CHECK: BX_RET 14, %noreg
entry:
notail call arm_aapcscc void @large_int_arrays_target([20 x i32] %arr)
ret void
@@ -300,7 +300,7 @@ define arm_aapcscc [2 x float] @test_fp_arrays_aapcs([3 x double] %arr) {
; CHECK: [[ARR2_FI:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[ARR2_ID]]
; CHECK: [[ARR2:%[0-9]+]]:_(s64) = G_LOAD [[ARR2_FI]]{{.*}}load 8 from %fixed-stack.[[ARR2_ID]]
; CHECK: [[ARR_MERGED:%[0-9]+]]:_(s192) = G_MERGE_VALUES [[ARR0]](s64), [[ARR1]](s64), [[ARR2]](s64)
-; CHECK: ADJCALLSTACKDOWN 8, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 8, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[ARR0:%[0-9]+]]:_(s64), [[ARR1:%[0-9]+]]:_(s64), [[ARR2:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[ARR_MERGED]](s192)
; CHECK: [[ARR0_0:%[0-9]+]]:_(s32), [[ARR0_1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARR0]](s64)
; LITTLE: %r0 = COPY [[ARR0_0]](s32)
@@ -320,11 +320,11 @@ define arm_aapcscc [2 x float] @test_fp_arrays_aapcs([3 x double] %arr) {
; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY %r0
; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %r1
; CHECK: [[R_MERGED:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32)
-; CHECK: ADJCALLSTACKUP 8, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 8, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[R_MERGED]](s64)
; CHECK: %r0 = COPY [[R0]]
; CHECK: %r1 = COPY [[R1]]
-; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1
entry:
%r = notail call arm_aapcscc [2 x float] @fp_arrays_aapcs_target([3 x double] %arr)
ret [2 x float] %r
@@ -357,7 +357,7 @@ define arm_aapcs_vfpcc [4 x float] @test_fp_arrays_aapcs_vfp([3 x double] %x, [3
; CHECK: [[X_ARR:%[0-9]+]]:_(s192) = G_MERGE_VALUES [[X0]](s64), [[X1]](s64), [[X2]](s64)
; CHECK: [[Y_ARR:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[Y0]](s32), [[Y1]](s32), [[Y2]](s32)
; CHECK: [[Z_ARR:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[Z0]](s64), [[Z1]](s64), [[Z2]](s64), [[Z3]](s64)
-; CHECK: ADJCALLSTACKDOWN 32, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 32, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[X0:%[0-9]+]]:_(s64), [[X1:%[0-9]+]]:_(s64), [[X2:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[X_ARR]](s192)
; CHECK: [[Y0:%[0-9]+]]:_(s32), [[Y1:%[0-9]+]]:_(s32), [[Y2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[Y_ARR]](s96)
; CHECK: [[Z0:%[0-9]+]]:_(s64), [[Z1:%[0-9]+]]:_(s64), [[Z2:%[0-9]+]]:_(s64), [[Z3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[Z_ARR]](s256)
@@ -389,13 +389,13 @@ define arm_aapcs_vfpcc [4 x float] @test_fp_arrays_aapcs_vfp([3 x double] %x, [3
; CHECK: [[R2:%[0-9]+]]:_(s32) = COPY %s2
; CHECK: [[R3:%[0-9]+]]:_(s32) = COPY %s3
; CHECK: [[R_MERGED:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32), [[R2]](s32), [[R3]](s32)
-; CHECK: ADJCALLSTACKUP 32, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 32, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32), [[R3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[R_MERGED]](s128)
; CHECK: %s0 = COPY [[R0]]
; CHECK: %s1 = COPY [[R1]]
; CHECK: %s2 = COPY [[R2]]
; CHECK: %s3 = COPY [[R3]]
-; CHECK: BX_RET 14, _, implicit %s0, implicit %s1, implicit %s2, implicit %s3
+; CHECK: BX_RET 14, %noreg, implicit %s0, implicit %s1, implicit %s2, implicit %s3
entry:
%r = notail call arm_aapcs_vfpcc [4 x float] @fp_arrays_aapcs_vfp_target([3 x double] %x, [3 x float] %y, [4 x double] %z)
ret [4 x float] %r
@@ -420,7 +420,7 @@ define arm_aapcscc [2 x i32*] @test_tough_arrays([6 x [4 x i32]] %arr) {
; CHECK: [[LAST_STACK_ELEMENT_FI:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[LAST_STACK_ID]]
; CHECK: [[LAST_STACK_ELEMENT:%[0-9]+]]:_(s32) = G_LOAD [[LAST_STACK_ELEMENT_FI]]{{.*}}load 4 from %fixed-stack.[[LAST_STACK_ID]]
; CHECK: [[ARG_ARR:%[0-9]+]]:_(s768) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32), [[R2]](s32), [[R3]](s32), [[FIRST_STACK_ELEMENT]](s32), {{.*}}, [[LAST_STACK_ELEMENT]](s32)
-; CHECK: ADJCALLSTACKDOWN 80, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 80, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32), [[R3:%[0-9]+]]:_(s32), [[FIRST_STACK_ELEMENT:%[0-9]+]]:_(s32), {{.*}}, [[LAST_STACK_ELEMENT:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ARG_ARR]](s768)
; CHECK: %r0 = COPY [[R0]]
; CHECK: %r1 = COPY [[R1]]
@@ -440,11 +440,11 @@ define arm_aapcscc [2 x i32*] @test_tough_arrays([6 x [4 x i32]] %arr) {
; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY %r0
; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %r1
; CHECK: [[RES_ARR:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32)
-; CHECK: ADJCALLSTACKUP 80, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 80, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[RES_ARR]](s64)
; CHECK: %r0 = COPY [[R0]]
; CHECK: %r1 = COPY [[R1]]
-; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1
entry:
%r = notail call arm_aapcscc [2 x i32*] @tough_arrays_target([6 x [4 x i32]] %arr)
ret [2 x i32*] %r
@@ -458,7 +458,7 @@ define arm_aapcscc {i32, i32} @test_structs({i32, i32} %x) {
; CHECK-DAG: [[X0:%[0-9]+]]:_(s32) = COPY %r0
; CHECK-DAG: [[X1:%[0-9]+]]:_(s32) = COPY %r1
; CHECK: [[X:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[X0]](s32), [[X1]](s32)
-; CHECK: ADJCALLSTACKDOWN 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[X0:%[0-9]+]]:_(s32), [[X1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[X]](s64)
; CHECK-DAG: %r0 = COPY [[X0]](s32)
; CHECK-DAG: %r1 = COPY [[X1]](s32)
@@ -466,11 +466,11 @@ define arm_aapcscc {i32, i32} @test_structs({i32, i32} %x) {
; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY %r0
; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY %r1
; CHECK: [[R:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32)
-; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 0, 0, 14, %noreg, implicit-def %sp, implicit %sp
; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[R]](s64)
; CHECK: %r0 = COPY [[R0]](s32)
; CHECK: %r1 = COPY [[R1]](s32)
-; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+; CHECK: BX_RET 14, %noreg, implicit %r0, implicit %r1
%r = notail call arm_aapcscc {i32, i32} @structs_target({i32, i32} %x)
ret {i32, i32} %r
}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
index 986f4a5ae48..044740e33a2 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
@@ -80,7 +80,7 @@ body: |
%1(s32) = COPY %r1
%2(s32) = G_ADD %0, %1
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -106,7 +106,7 @@ body: |
%1(s32) = COPY %r1
%2(s32) = G_SUB %0, %1
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -132,7 +132,7 @@ body: |
%1(s32) = COPY %r1
%2(s32) = G_MUL %0, %1
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -158,7 +158,7 @@ body: |
%1(s32) = COPY %r1
%2(s32) = G_SDIV %0, %1
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -184,7 +184,7 @@ body: |
%1(s32) = COPY %r1
%2(s32) = G_UDIV %0, %1
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -210,7 +210,7 @@ body: |
%1(s32) = COPY %r1
%2(s32) = G_AND %0, %1
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -236,7 +236,7 @@ body: |
%1(s32) = COPY %r1
%2(s32) = G_OR %0, %1
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -262,7 +262,7 @@ body: |
%1(s32) = COPY %r1
%2(s32) = G_XOR %0, %1
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -288,7 +288,7 @@ body: |
%1(s32) = COPY %r1
%2(s32) = G_LSHR %0, %1
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -314,7 +314,7 @@ body: |
%1(s32) = COPY %r1
%2(s32) = G_ASHR %0, %1
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -340,7 +340,7 @@ body: |
%1(s32) = COPY %r1
%2(s32) = G_SHL %0, %1
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -376,7 +376,7 @@ body: |
%3(s8) = G_LOAD %0 :: (load 1)
%4(s1) = G_LOAD %0 :: (load 1)
%5(p0) = G_LOAD %0 :: (load 4)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -418,7 +418,7 @@ body: |
G_STORE %5(p0), %0 :: (store 4)
%6(s64) = COPY %d6
G_STORE %6(s64), %0 :: (store 8)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -451,7 +451,7 @@ body: |
%4(p0) = G_GEP %2, %3(s32)
G_STORE %1(s32), %4(p0) :: (store 4)
- BX_RET 14, _
+ BX_RET 14, %noreg
...
---
@@ -477,7 +477,7 @@ body: |
%1(s32) = COPY %r1
%2(p0) = G_GEP %0, %1(s32)
%r0 = COPY %2(p0)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_constants
@@ -493,7 +493,7 @@ body: |
bb.0:
%0(s32) = G_CONSTANT 42
%r0 = COPY %0(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_globals
@@ -509,7 +509,7 @@ body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @a_global
%r0 = COPY %0(p0)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_anyext_s8_32
@@ -533,7 +533,7 @@ body: |
%1(s8) = G_TRUNC %0(s32)
%2(s32) = G_ANYEXT %1(s8)
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_anyext_s16_32
@@ -557,7 +557,7 @@ body: |
%1(s16) = G_TRUNC %0(s32)
%2(s32) = G_ANYEXT %1(s16)
%r0 = COPY %2(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
name: test_trunc_s32_16
@@ -581,7 +581,7 @@ body: |
%2(p0) = COPY %r1
%1(s16) = G_TRUNC %0(s32)
G_STORE %1(s16), %2 :: (store 2)
- BX_RET 14, _
+ BX_RET 14, %noreg
...
---
name: test_icmp_eq_s32
@@ -609,7 +609,7 @@ body: |
%2(s1) = G_ICMP intpred(eq), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -638,7 +638,7 @@ body: |
%2(s1) = G_FCMP floatpred(one), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -667,7 +667,7 @@ body: |
%2(s1) = G_FCMP floatpred(ugt), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
%r0 = COPY %3(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -699,7 +699,7 @@ body: |
%3(s1) = G_TRUNC %2(s32)
%4(s32) = G_SELECT %3(s1), %0, %1
%r0 = COPY %4(s32)
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -727,10 +727,10 @@ body: |
G_BR %bb.2
bb.1:
- BX_RET 14, _
+ BX_RET 14, %noreg
bb.2:
- BX_RET 14, _
+ BX_RET 14, %noreg
...
---
@@ -756,7 +756,7 @@ body: |
%1(s32) = COPY %s1
%2(s32) = G_FADD %0, %1
%s0 = COPY %2(s32)
- BX_RET 14, _, implicit %s0
+ BX_RET 14, %noreg, implicit %s0
...
---
@@ -782,7 +782,7 @@ body: |
%1(s64) = COPY %d1
%2(s64) = G_FADD %0, %1
%d0 = COPY %2(s64)
- BX_RET 14, _, implicit %d0
+ BX_RET 14, %noreg, implicit %d0
...
---
@@ -808,7 +808,7 @@ body: |
%1(s32) = COPY %s1
%2(s32) = G_FSUB %0, %1
%s0 = COPY %2(s32)
- BX_RET 14, _, implicit %s0
+ BX_RET 14, %noreg, implicit %s0
...
---
@@ -834,7 +834,7 @@ body: |
%1(s64) = COPY %d1
%2(s64) = G_FSUB %0, %1
%d0 = COPY %2(s64)
- BX_RET 14, _, implicit %d0
+ BX_RET 14, %noreg, implicit %d0
...
---
@@ -860,7 +860,7 @@ body: |
%1(s32) = COPY %s1
%2(s32) = G_FMUL %0, %1
%s0 = COPY %2(s32)
- BX_RET 14, _, implicit %s0
+ BX_RET 14, %noreg, implicit %s0
...
---
@@ -886,7 +886,7 @@ body: |
%1(s64) = COPY %d1
%2(s64) = G_FMUL %0, %1
%d0 = COPY %2(s64)
- BX_RET 14, _, implicit %d0
+ BX_RET 14, %noreg, implicit %d0
...
---
@@ -912,7 +912,7 @@ body: |
%1(s32) = COPY %s1
%2(s32) = G_FDIV %0, %1
%s0 = COPY %2(s32)
- BX_RET 14, _, implicit %s0
+ BX_RET 14, %noreg, implicit %s0
...
---
@@ -938,7 +938,7 @@ body: |
%1(s64) = COPY %d1
%2(s64) = G_FDIV %0, %1
%d0 = COPY %2(s64)
- BX_RET 14, _, implicit %d0
+ BX_RET 14, %noreg, implicit %d0
...
---
@@ -970,6 +970,6 @@ body: |
%3(s32), %4(s32) = G_UNMERGE_VALUES %2(s64)
%r0 = COPY %3(s32)
%r1 = COPY %4(s32)
- BX_RET 14, _, implicit %r0, implicit %r1
+ BX_RET 14, %noreg, implicit %r0, implicit %r1
...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-select-globals-pic.mir b/test/CodeGen/ARM/GlobalISel/arm-select-globals-pic.mir
index 448a7f86e88..99697983192 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-select-globals-pic.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-select-globals-pic.mir
@@ -33,13 +33,13 @@ body: |
; ELF: [[G:%[0-9]+]]:gpr = LDRLIT_ga_pcrel {{.*}}@internal_global
%1(s32) = G_LOAD %0(p0) :: (load 4 from @internal_global)
- ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, _ :: (load 4 from @internal_global)
+ ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @internal_global)
%r0 = COPY %1(s32)
; CHECK: %r0 = COPY [[V]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_external_global
@@ -59,13 +59,13 @@ body: |
; ELF: [[G:%[0-9]+]]:gpr = LDRLIT_ga_pcrel_ldr target-flags(<unknown>) @external_global :: (load 4 from got)
%1(s32) = G_LOAD %0(p0) :: (load 4 from @external_global)
- ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, _ :: (load 4 from @external_global)
+ ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @external_global)
%r0 = COPY %1(s32)
; CHECK: %r0 = COPY [[V]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_internal_constant
@@ -85,13 +85,13 @@ body: |
; ELF: [[G:%[0-9]+]]:gpr = LDRLIT_ga_pcrel {{.*}}@internal_constant
%1(s32) = G_LOAD %0(p0) :: (load 4 from @internal_constant)
- ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, _ :: (load 4 from @internal_constant)
+ ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @internal_constant)
%r0 = COPY %1(s32)
; CHECK: %r0 = COPY [[V]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_external_constant
@@ -111,11 +111,11 @@ body: |
; ELF: [[G:%[0-9]+]]:gpr = LDRLIT_ga_pcrel_ldr target-flags(<unknown>) @external_constant :: (load 4 from got)
%1(s32) = G_LOAD %0(p0) :: (load 4 from @external_constant)
- ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, _ :: (load 4 from @external_constant)
+ ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @external_constant)
%r0 = COPY %1(s32)
; CHECK: %r0 = COPY [[V]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir b/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir
index e80700317e0..dc48dee00c8 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir
@@ -37,19 +37,19 @@ body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @internal_global
; RW-DEFAULT-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @internal_global
- ; RW-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, _ :: (load 4 from constant-pool)
+ ; RW-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool)
; RWPI-MOVT: [[OFF:%[0-9]+]]:gpr = MOVi32imm {{.*}} @internal_global
- ; RWPI-NOMOVT: [[OFF:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, _ :: (load 4 from constant-pool)
- ; RWPI: [[G:%[0-9]+]]:gpr = ADDrr %r9, [[OFF]], 14, _, _
+ ; RWPI-NOMOVT: [[OFF:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool)
+ ; RWPI: [[G:%[0-9]+]]:gpr = ADDrr %r9, [[OFF]], 14, %noreg, %noreg
%1(s32) = G_LOAD %0(p0) :: (load 4 from @internal_global)
- ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, _ :: (load 4 from @internal_global)
+ ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @internal_global)
%r0 = COPY %1(s32)
; CHECK: %r0 = COPY [[V]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_external_global
@@ -71,19 +71,19 @@ body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @external_global
; RW-DEFAULT-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @external_global
- ; RW-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, _ :: (load 4 from constant-pool)
+ ; RW-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool)
; RWPI-MOVT: [[OFF:%[0-9]+]]:gpr = MOVi32imm {{.*}} @external_global
- ; RWPI-NOMOVT: [[OFF:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, _ :: (load 4 from constant-pool)
- ; RWPI: [[G:%[0-9]+]]:gpr = ADDrr %r9, [[OFF]], 14, _, _
+ ; RWPI-NOMOVT: [[OFF:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool)
+ ; RWPI: [[G:%[0-9]+]]:gpr = ADDrr %r9, [[OFF]], 14, %noreg, %noreg
%1(s32) = G_LOAD %0(p0) :: (load 4 from @external_global)
- ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, _ :: (load 4 from @external_global)
+ ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @external_global)
%r0 = COPY %1(s32)
; CHECK: %r0 = COPY [[V]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_internal_constant
@@ -104,16 +104,16 @@ body: |
; ROPI-MOVT: [[G:%[0-9]+]]:gpr = MOV_ga_pcrel @internal_constant
; ROPI-NOMOVT: [[G:%[0-9]+]]:gpr = LDRLIT_ga_pcrel @internal_constant
; RO-DEFAULT-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @internal_constant
- ; RO-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, _ :: (load 4 from constant-pool)
+ ; RO-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool)
%1(s32) = G_LOAD %0(p0) :: (load 4 from @internal_constant)
- ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, _ :: (load 4 from @internal_constant)
+ ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @internal_constant)
%r0 = COPY %1(s32)
; CHECK: %r0 = COPY [[V]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_external_constant
@@ -134,14 +134,14 @@ body: |
; ROPI-MOVT: [[G:%[0-9]+]]:gpr = MOV_ga_pcrel @external_constant
; ROPI-NOMOVT: [[G:%[0-9]+]]:gpr = LDRLIT_ga_pcrel @external_constant
; RO-DEFAULT-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @external_constant
- ; RO-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, _ :: (load 4 from constant-pool)
+ ; RO-DEFAULT-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool)
%1(s32) = G_LOAD %0(p0) :: (load 4 from @external_constant)
- ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, _ :: (load 4 from @external_constant)
+ ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg :: (load 4 from @external_constant)
%r0 = COPY %1(s32)
; CHECK: %r0 = COPY [[V]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir b/test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir
index 034b88296dc..cd03d42e4a5 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir
@@ -26,18 +26,18 @@ body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @internal_global
; ELF-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @internal_global
- ; ELF-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, _ :: (load 4 from constant-pool)
+ ; ELF-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool)
; DARWIN-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @internal_global
; DARWIN-NOMOVT: [[G:%[0-9]+]]:gpr = LDRLIT_ga_abs @internal_global
%1(s32) = G_LOAD %0(p0) :: (load 4 from @internal_global)
- ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, _
+ ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg
%r0 = COPY %1(s32)
; CHECK: %r0 = COPY [[V]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
---
name: test_external_global
@@ -56,16 +56,16 @@ body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @external_global
; ELF-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @external_global
- ; ELF-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, _ :: (load 4 from constant-pool)
+ ; ELF-NOMOVT: [[G:%[0-9]+]]:gpr = LDRi12 %const.0, 0, 14, %noreg :: (load 4 from constant-pool)
; DARWIN-MOVT: [[G:%[0-9]+]]:gpr = MOVi32imm @external_global
; DARWIN-NOMOVT: [[G:%[0-9]+]]:gpr = LDRLIT_ga_abs @external_global
%1(s32) = G_LOAD %0(p0) :: (load 4 from @external_global)
- ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, _
+ ; CHECK: [[V:%[0-9]+]]:gpr = LDRi12 [[G]], 0, 14, %noreg
%r0 = COPY %1(s32)
; CHECK: %r0 = COPY [[V]]
- BX_RET 14, _, implicit %r0
- ; CHECK: BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
+ ; CHECK: BX_RET 14, %noreg, implicit %r0
...
diff --git a/test/CodeGen/ARM/a15-SD-dep.ll b/test/CodeGen/ARM/a15-SD-dep.ll
index 5e5ca4b873f..625c40eb416 100644
--- a/test/CodeGen/ARM/a15-SD-dep.ll
+++ b/test/CodeGen/ARM/a15-SD-dep.ll
@@ -114,4 +114,4 @@ sw.bb1: ; preds = %entry, %sw.bb
sw.epilog: ; preds = %entry, %sw.bb1
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/ARM/cmp1-peephole-thumb.mir b/test/CodeGen/ARM/cmp1-peephole-thumb.mir
index 3e87ced0ee5..62675b4a77c 100644
--- a/test/CodeGen/ARM/cmp1-peephole-thumb.mir
+++ b/test/CodeGen/ARM/cmp1-peephole-thumb.mir
@@ -49,9 +49,9 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
-# CHECK: tMOVi8 1, 14, _
-# CHECK: tMOVi8 0, 14, _
-# CHECK: tMUL %1, %0, 14, _
+# CHECK: tMOVi8 1, 14, %noreg
+# CHECK: tMOVi8 0, 14, %noreg
+# CHECK: tMUL %1, %0, 14, %noreg
# CHECK-NOT: tCMPi8
body: |
bb.0.entry:
@@ -59,10 +59,10 @@ body: |
%1 = COPY %r1
%0 = COPY %r0
- %2, %cpsr = tMUL %1, %0, 14, _
- %3, %cpsr = tMOVi8 1, 14, _
- %4, %cpsr = tMOVi8 0, 14, _
- tCMPi8 killed %2, 0, 14, _, implicit-def %cpsr
+ %2, %cpsr = tMUL %1, %0, 14, %noreg
+ %3, %cpsr = tMOVi8 1, 14, %noreg
+ %4, %cpsr = tMOVi8 0, 14, %noreg
+ tCMPi8 killed %2, 0, 14, %noreg, implicit-def %cpsr
tBcc %bb.2.entry, 0, %cpsr
bb.1.entry:
@@ -70,6 +70,6 @@ body: |
bb.2.entry:
%5 = PHI %4, %bb.1.entry, %3, %bb.0.entry
%r0 = COPY %5
- tBX_RET 14, _, implicit %r0
+ tBX_RET 14, %noreg, implicit %r0
...
diff --git a/test/CodeGen/ARM/cmp2-peephole-thumb.mir b/test/CodeGen/ARM/cmp2-peephole-thumb.mir
index a31086d2113..12569b53fde 100644
--- a/test/CodeGen/ARM/cmp2-peephole-thumb.mir
+++ b/test/CodeGen/ARM/cmp2-peephole-thumb.mir
@@ -80,24 +80,24 @@ body: |
%1 = COPY %r1
%0 = COPY %r0
- %2, %cpsr = tMUL %0, %1, 14, _
- tSTRspi %2, %stack.1.mul, 0, 14, _ :: (store 4 into %ir.mul)
- tCMPi8 %2, 0, 14, _, implicit-def %cpsr
+ %2, %cpsr = tMUL %0, %1, 14, %noreg
+ tSTRspi %2, %stack.1.mul, 0, 14, %noreg :: (store 4 into %ir.mul)
+ tCMPi8 %2, 0, 14, %noreg, implicit-def %cpsr
tBcc %bb.2.if.end, 12, %cpsr
- tB %bb.1.if.then, 14, _
+ tB %bb.1.if.then, 14, %noreg
bb.1.if.then:
- %4, %cpsr = tMOVi8 42, 14, _
- tSTRspi killed %4, %stack.0.retval, 0, 14, _ :: (store 4 into %ir.retval)
- tB %bb.3.return, 14, _
+ %4, %cpsr = tMOVi8 42, 14, %noreg
+ tSTRspi killed %4, %stack.0.retval, 0, 14, %noreg :: (store 4 into %ir.retval)
+ tB %bb.3.return, 14, %noreg
bb.2.if.end:
- %3, %cpsr = tMOVi8 1, 14, _
- tSTRspi killed %3, %stack.0.retval, 0, 14, _ :: (store 4 into %ir.retval)
+ %3, %cpsr = tMOVi8 1, 14, %noreg
+ tSTRspi killed %3, %stack.0.retval, 0, 14, %noreg :: (store 4 into %ir.retval)
bb.3.return:
- %5 = tLDRspi %stack.0.retval, 0, 14, _ :: (dereferenceable load 4 from %ir.retval)
+ %5 = tLDRspi %stack.0.retval, 0, 14, %noreg :: (dereferenceable load 4 from %ir.retval)
%r0 = COPY %5
- tBX_RET 14, _, implicit %r0
+ tBX_RET 14, %noreg, implicit %r0
...
diff --git a/test/CodeGen/ARM/constant-islands-cfg.mir b/test/CodeGen/ARM/constant-islands-cfg.mir
index 66d854393b5..140ef727e43 100644
--- a/test/CodeGen/ARM/constant-islands-cfg.mir
+++ b/test/CodeGen/ARM/constant-islands-cfg.mir
@@ -48,17 +48,17 @@ fixedStack:
body: |
bb.0:
liveins: %r0
- tCMPi8 killed %r0, 0, 14, _, implicit-def %cpsr
+ tCMPi8 killed %r0, 0, 14, %noreg, implicit-def %cpsr
tBcc %bb.2, 1, killed %cpsr
- tB %bb.3, 14, _
+ tB %bb.3, 14, %noreg
bb.1:
dead %r0 = SPACE 256, undef %r0
bb.2:
- tPOP_RET 14, _, def %pc
+ tPOP_RET 14, %noreg, def %pc
bb.3:
- tPOP_RET 14, _, def %pc
+ tPOP_RET 14, %noreg, def %pc
...
diff --git a/test/CodeGen/ARM/dbg-range-extension.mir b/test/CodeGen/ARM/dbg-range-extension.mir
index a79607705c1..02105eabc6d 100644
--- a/test/CodeGen/ARM/dbg-range-extension.mir
+++ b/test/CodeGen/ARM/dbg-range-extension.mir
@@ -23,37 +23,37 @@
# CHECK: [[VAR_I:![0-9]+]] = !DILocalVariable(name: "i",
# CHECK: bb.0.entry
-# CHECK: DBG_VALUE debug-use %r0, debug-use _, [[VAR_A]]
-# CHECK: DBG_VALUE debug-use [[REG_A:%r[0-9]+]], debug-use _, [[VAR_A]]
-# CHECK: DBG_VALUE debug-use [[REG_B:%r[0-9]+]], debug-use _, [[VAR_B]]
+# CHECK: DBG_VALUE debug-use %r0, debug-use %noreg, [[VAR_A]]
+# CHECK: DBG_VALUE debug-use [[REG_A:%r[0-9]+]], debug-use %noreg, [[VAR_A]]
+# CHECK: DBG_VALUE debug-use [[REG_B:%r[0-9]+]], debug-use %noreg, [[VAR_B]]
# CHECK: bb.1.if.then
-# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use _, [[VAR_B]]
-# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use _, [[VAR_A]]
-# CHECK: DBG_VALUE debug-use [[REG_C:%r[0-9]+]], debug-use _, [[VAR_C]]
+# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use %noreg, [[VAR_B]]
+# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use %noreg, [[VAR_A]]
+# CHECK: DBG_VALUE debug-use [[REG_C:%r[0-9]+]], debug-use %noreg, [[VAR_C]]
# CHECK: DBG_VALUE 1, 0, [[VAR_I]]
# CHECK: bb.2.for.body
-# CHECK: DBG_VALUE debug-use [[REG_I:%r[0-9]+]], debug-use _, [[VAR_I]]
-# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use _, [[VAR_C]]
-# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use _, [[VAR_B]]
-# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use _, [[VAR_A]]
-# CHECK: DBG_VALUE debug-use [[REG_I]], debug-use _, [[VAR_I]]
+# CHECK: DBG_VALUE debug-use [[REG_I:%r[0-9]+]], debug-use %noreg, [[VAR_I]]
+# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use %noreg, [[VAR_C]]
+# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use %noreg, [[VAR_B]]
+# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use %noreg, [[VAR_A]]
+# CHECK: DBG_VALUE debug-use [[REG_I]], debug-use %noreg, [[VAR_I]]
# CHECK: bb.3.for.cond
-# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use _, [[VAR_C]]
-# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use _, [[VAR_B]]
-# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use _, [[VAR_A]]
-# CHECK: DBG_VALUE debug-use [[REG_I]], debug-use _, [[VAR_I]]
+# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use %noreg, [[VAR_C]]
+# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use %noreg, [[VAR_B]]
+# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use %noreg, [[VAR_A]]
+# CHECK: DBG_VALUE debug-use [[REG_I]], debug-use %noreg, [[VAR_I]]
# CHECK: bb.4.for.cond.cleanup
-# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use _, [[VAR_C]]
-# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use _, [[VAR_B]]
-# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use _, [[VAR_A]]
+# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use %noreg, [[VAR_C]]
+# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use %noreg, [[VAR_B]]
+# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use %noreg, [[VAR_A]]
# CHECK: bb.5.if.end
-# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use _, [[VAR_B]]
-# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use _, [[VAR_A]]
+# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use %noreg, [[VAR_B]]
+# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use %noreg, [[VAR_A]]
--- |
; ModuleID = '/data/kwalker/work/OpenSource-llvm/llvm/test/CodeGen/ARM/dbg-range-extension.ll'
source_filename = "/data/kwalker/work/OpenSource-llvm/llvm/test/CodeGen/ARM/dbg-range-extension.ll"
@@ -211,7 +211,7 @@ body: |
bb.0.entry:
liveins: %r0, %r4, %r5, %r6, %r7, %r11, %lr
- %sp = frame-setup STMDB_UPD %sp, 14, _, killed %r4, killed %r5, killed %r6, killed %r7, killed %r11, killed %lr
+ %sp = frame-setup STMDB_UPD %sp, 14, %noreg, killed %r4, killed %r5, killed %r6, killed %r7, killed %r11, killed %lr
frame-setup CFI_INSTRUCTION def_cfa_offset 24
frame-setup CFI_INSTRUCTION offset %lr, -4
frame-setup CFI_INSTRUCTION offset %r11, -8
@@ -219,58 +219,58 @@ body: |
frame-setup CFI_INSTRUCTION offset %r6, -16
frame-setup CFI_INSTRUCTION offset %r5, -20
frame-setup CFI_INSTRUCTION offset %r4, -24
- DBG_VALUE debug-use %r0, debug-use _, !13, !20, debug-location !21
- %r4 = MOVr killed %r0, 14, _, _
- DBG_VALUE debug-use %r4, debug-use _, !13, !20, debug-location !21
- %r0 = MOVi 10, 14, _, _, debug-location !22
- %r1 = MOVi 11, 14, _, _, debug-location !22
+ DBG_VALUE debug-use %r0, debug-use %noreg, !13, !20, debug-location !21
+ %r4 = MOVr killed %r0, 14, %noreg, %noreg
+ DBG_VALUE debug-use %r4, debug-use %noreg, !13, !20, debug-location !21
+ %r0 = MOVi 10, 14, %noreg, _, debug-location !22
+ %r1 = MOVi 11, 14, %noreg, _, debug-location !22
BL @func2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit killed %r0, implicit killed %r1, implicit-def %sp, implicit-def %r0, debug-location !22
- %r5 = MOVr killed %r0, 14, _, _, debug-location !22
- DBG_VALUE debug-use %r5, debug-use _, !14, !20, debug-location !23
- CMPri %r4, 0, 14, _, implicit-def %cpsr, debug-location !25
+ %r5 = MOVr killed %r0, 14, %noreg, _, debug-location !22
+ DBG_VALUE debug-use %r5, debug-use %noreg, !14, !20, debug-location !23
+ CMPri %r4, 0, 14, %noreg, implicit-def %cpsr, debug-location !25
Bcc %bb.5.if.end, 0, killed %cpsr
bb.1.if.then:
liveins: %r4, %r5
- %r0 = MOVi 12, 14, _, _, debug-location !26
- %r1 = MOVi 13, 14, _, _, debug-location !26
+ %r0 = MOVi 12, 14, %noreg, _, debug-location !26
+ %r1 = MOVi 13, 14, %noreg, _, debug-location !26
BL @func2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit killed %r0, implicit killed %r1, implicit-def %sp, implicit-def %r0, debug-location !26
- %r6 = MOVr killed %r0, 14, _, _, debug-location !26
- DBG_VALUE debug-use %r6, debug-use _, !15, !20, debug-location !27
- %r7 = MOVi 1, 14, _, _
+ %r6 = MOVr killed %r0, 14, %noreg, _, debug-location !26
+ DBG_VALUE debug-use %r6, debug-use %noreg, !15, !20, debug-location !27
+ %r7 = MOVi 1, 14, %noreg, %noreg
DBG_VALUE 1, 0, !18, !20, debug-location !28
B %bb.3.for.cond
bb.2.for.body:
liveins: %r4, %r5, %r6, %r7
- %r1 = ADDrr %r5, %r7, 14, _, _, debug-location !36
- %r0 = MOVr %r7, 14, _, _, debug-location !36
+ %r1 = ADDrr %r5, %r7, 14, %noreg, _, debug-location !36
+ %r0 = MOVr %r7, 14, %noreg, _, debug-location !36
BL @func2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit killed %r0, implicit killed %r1, implicit-def %sp, implicit-def dead %r0, debug-location !36
- %r7 = ADDri killed %r7, 1, 14, _, _, debug-location !38
- DBG_VALUE debug-use %r7, debug-use _, !18, !20, debug-location !28
+ %r7 = ADDri killed %r7, 1, 14, %noreg, _, debug-location !38
+ DBG_VALUE debug-use %r7, debug-use %noreg, !18, !20, debug-location !28
bb.3.for.cond:
liveins: %r4, %r5, %r6, %r7
- DBG_VALUE debug-use %r7, debug-use _, !18, !20, debug-location !28
- CMPrr %r7, %r4, 14, _, implicit-def %cpsr, debug-location !33
+ DBG_VALUE debug-use %r7, debug-use %noreg, !18, !20, debug-location !28
+ CMPrr %r7, %r4, 14, %noreg, implicit-def %cpsr, debug-location !33
Bcc %bb.2.for.body, 11, killed %cpsr, debug-location !33
bb.4.for.cond.cleanup:
liveins: %r4, %r5, %r6
- %r0 = MOVr %r5, 14, _, _, debug-location !34
- %r1 = MOVr killed %r6, 14, _, _, debug-location !34
+ %r0 = MOVr %r5, 14, %noreg, _, debug-location !34
+ %r1 = MOVr killed %r6, 14, %noreg, _, debug-location !34
BL @func2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit killed %r0, implicit killed %r1, implicit-def %sp, implicit-def dead %r0, debug-location !34
bb.5.if.end:
liveins: %r4, %r5
- %r0 = MOVr killed %r5, 14, _, _, debug-location !43
- %r1 = MOVr killed %r4, 14, _, _, debug-location !43
- %sp = LDMIA_UPD %sp, 14, _, def %r4, def %r5, def %r6, def %r7, def %r11, def %lr, debug-location !43
+ %r0 = MOVr killed %r5, 14, %noreg, _, debug-location !43
+ %r1 = MOVr killed %r4, 14, %noreg, _, debug-location !43
+ %sp = LDMIA_UPD %sp, 14, %noreg, def %r4, def %r5, def %r6, def %r7, def %r11, def %lr, debug-location !43
TAILJMPd @func2, implicit %sp, implicit %sp, implicit killed %r0, implicit killed %r1, debug-location !43
...
diff --git a/test/CodeGen/ARM/expand-pseudos.mir b/test/CodeGen/ARM/expand-pseudos.mir
index 1cc46bc0f55..b35c2dce66d 100644
--- a/test/CodeGen/ARM/expand-pseudos.mir
+++ b/test/CodeGen/ARM/expand-pseudos.mir
@@ -25,11 +25,11 @@ body: |
bb.0.entry:
liveins: %r0
- %r1 = MOVi 2, 14, _, _
- CMPri killed %r0, 0, 14, _, implicit-def %cpsr
+ %r1 = MOVi 2, 14, %noreg, %noreg
+ CMPri killed %r0, 0, 14, %noreg, implicit-def %cpsr
%r1 = MOVCCi16 killed %r1, 500, 0, killed %cpsr
- %r0 = MOVr killed %r1, 14, _, _
- BX_RET 14, _, implicit %r0
+ %r0 = MOVr killed %r1, 14, %noreg, %noreg
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -42,11 +42,11 @@ body: |
bb.0.entry:
liveins: %r0
- %r1 = MOVi 2, 14, _, _
- CMPri killed %r0, 0, 14, _, implicit-def %cpsr
+ %r1 = MOVi 2, 14, %noreg, %noreg
+ CMPri killed %r0, 0, 14, %noreg, implicit-def %cpsr
%r1 = MOVCCi32imm killed %r1, 500500500, 0, killed %cpsr
- %r0 = MOVr killed %r1, 14, _, _
- BX_RET 14, _, implicit %r0
+ %r0 = MOVr killed %r1, 14, %noreg, %noreg
+ BX_RET 14, %noreg, implicit %r0
...
---
@@ -60,9 +60,9 @@ body: |
bb.0.entry:
liveins: %r0, %r1
- CMPri %r1, 500, 14, _, implicit-def %cpsr
+ CMPri %r1, 500, 14, %noreg, implicit-def %cpsr
%r0 = MOVCCr killed %r0, killed %r1, 12, killed %cpsr
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
@@ -72,4 +72,4 @@ body: |
# CHECK: %r1 = MOVi16 2068, 0, %cpsr, implicit killed %r1
# CHECK: %r1 = MOVTi16 %r1, 7637, 0, %cpsr
# CHECK-LABEL: name: test3
-# CHECK: %r0 = MOVr killed %r1, 12, killed %cpsr, _, implicit killed %r0
+# CHECK: %r0 = MOVr killed %r1, 12, killed %cpsr, %noreg, implicit killed %r0
diff --git a/test/CodeGen/ARM/fpoffset_overflow.mir b/test/CodeGen/ARM/fpoffset_overflow.mir
index 4f3524bf7d1..59d981a436e 100644
--- a/test/CodeGen/ARM/fpoffset_overflow.mir
+++ b/test/CodeGen/ARM/fpoffset_overflow.mir
@@ -3,10 +3,10 @@
# This should trigger an emergency spill in the register scavenger because the
# frame offset into the large argument is too large.
# CHECK-LABEL: name: func0
-# CHECK: t2STRi12 killed [[SPILLED:%r[0-9]+]], %sp, 0, 14, _ :: (store 4 into %stack.0)
-# CHECK: [[SPILLED]] = t2ADDri killed %sp, 4096, 14, _, _
-# CHECK: %sp = t2LDRi12 killed [[SPILLED]], 40, 14, _ :: (load 4)
-# CHECK: [[SPILLED]] = t2LDRi12 %sp, 0, 14, _ :: (load 4 from %stack.0)
+# CHECK: t2STRi12 killed [[SPILLED:%r[0-9]+]], %sp, 0, 14, %noreg :: (store 4 into %stack.0)
+# CHECK: [[SPILLED]] = t2ADDri killed %sp, 4096, 14, %noreg, %noreg
+# CHECK: %sp = t2LDRi12 killed [[SPILLED]], 40, 14, %noreg :: (load 4)
+# CHECK: [[SPILLED]] = t2LDRi12 %sp, 0, 14, %noreg :: (load 4 from %stack.0)
name: func0
tracksRegLiveness: true
fixedStack:
@@ -31,7 +31,7 @@ body: |
%r12 = IMPLICIT_DEF
%lr = IMPLICIT_DEF
- %sp = t2LDRi12 %fixed-stack.0, 0, 14, _ :: (load 4)
+ %sp = t2LDRi12 %fixed-stack.0, 0, 14, %noreg :: (load 4)
KILL %r0
KILL %r1
@@ -53,7 +53,7 @@ body: |
# CHECK-LABEL: name: func1
# CHECK-NOT: t2STRi12
# CHECK-NOT: t2ADDri
-# CHECK: %r11 = t2LDRi12 %sp, 4092, 14, _ :: (load 4)
+# CHECK: %r11 = t2LDRi12 %sp, 4092, 14, %noreg :: (load 4)
# CHECK-NOT: t2LDRi12
name: func1
tracksRegLiveness: true
@@ -78,7 +78,7 @@ body: |
%r12 = IMPLICIT_DEF
%lr = IMPLICIT_DEF
- %r11 = t2LDRi12 %fixed-stack.0, 0, 14, _ :: (load 4)
+ %r11 = t2LDRi12 %fixed-stack.0, 0, 14, %noreg :: (load 4)
KILL %r0
KILL %r1
diff --git a/test/CodeGen/ARM/imm-peephole-arm.mir b/test/CodeGen/ARM/imm-peephole-arm.mir
index 95ae58ff9bd..0457507eb44 100644
--- a/test/CodeGen/ARM/imm-peephole-arm.mir
+++ b/test/CodeGen/ARM/imm-peephole-arm.mir
@@ -42,18 +42,18 @@ body: |
%0 = COPY %r0
%1 = MOVi32imm -25733
- %2 = SUBrr %0, killed %1, 14, _, _
+ %2 = SUBrr %0, killed %1, 14, %noreg, %noreg
%3 = MOVi32imm 25733
- %4 = SUBrr %0, killed %3, 14, _, _
+ %4 = SUBrr %0, killed %3, 14, %noreg, %noreg
%5 = MOVi32imm -25733
- %6 = ADDrr %0, killed %5, 14, _, _
+ %6 = ADDrr %0, killed %5, 14, %noreg, %noreg
%7 = MOVi32imm 25733
- %8 = ADDrr killed %0, killed %7, 14, _, _
+ %8 = ADDrr killed %0, killed %7, 14, %noreg, %noreg
%r0 = COPY killed %8
- BX_RET 14, _, implicit %r0
+ BX_RET 14, %noreg, implicit %r0
...
diff --git a/test/CodeGen/ARM/imm-peephole-thumb.mir b/test/CodeGen/ARM/imm-peephole-thumb.mir
index 553717ba74a..04e2b193e96 100644
--- a/test/CodeGen/ARM/imm-peephole-thumb.mir
+++ b/test/CodeGen/ARM/imm-peephole-thumb.mir
@@ -41,18 +41,18 @@ body: |
liveins: %r0
%0 = COPY %r0
%1 = t2MOVi32imm -25733
- %2 = t2SUBrr %0, killed %1, 14, _, _
+ %2 = t2SUBrr %0, killed %1, 14, %noreg, %noreg
%3 = t2MOVi32imm 25733
- %4 = t2SUBrr %0, killed %3, 14, _, _
+ %4 = t2SUBrr %0, killed %3, 14, %noreg, %noreg
%5 = t2MOVi32imm -25733
- %6= t2ADDrr %0, killed %5, 14, _, _
+ %6= t2ADDrr %0, killed %5, 14, %noreg, %noreg
%7 = t2MOVi32imm 25733
- %8 = t2ADDrr killed %0, killed %7, 14, _, _
+ %8 = t2ADDrr killed %0, killed %7, 14, %noreg, %noreg
%r0 = COPY killed %8
- tBX_RET 14, _, implicit %r0
+ tBX_RET 14, %noreg, implicit %r0
...
diff --git a/test/CodeGen/ARM/indirect-hidden.ll b/test/CodeGen/ARM/indirect-hidden.ll
index ae1c505bb68..eb030283487 100644
--- a/test/CodeGen/ARM/indirect-hidden.ll
+++ b/test/CodeGen/ARM/indirect-hidden.ll
@@ -19,4 +19,4 @@ define i32* @get_var_hidden() {
; CHECK-NOT: __DATA,__data
; CHECK: .indirect_symbol _var_hidden
-; CHECK-NEXT: .long 0 \ No newline at end of file
+; CHECK-NEXT: .long 0
diff --git a/test/CodeGen/ARM/litpool-licm.ll b/test/CodeGen/ARM/litpool-licm.ll
index dc6b37feaf0..923971d1afe 100644
--- a/test/CodeGen/ARM/litpool-licm.ll
+++ b/test/CodeGen/ARM/litpool-licm.ll
@@ -43,4 +43,4 @@ done:
ret void
}
-declare void @foo(i32*) \ No newline at end of file
+declare void @foo(i32*)
diff --git a/test/CodeGen/ARM/load_store_opt_kill.mir b/test/CodeGen/ARM/load_store_opt_kill.mir
index 4c210eaf8e9..85cc5953d1d 100644
--- a/test/CodeGen/ARM/load_store_opt_kill.mir
+++ b/test/CodeGen/ARM/load_store_opt_kill.mir
@@ -3,8 +3,8 @@
# CHECK-LABEL: name: f
name: f
# Make sure the load into %r0 doesn't clobber the base register before the second load uses it.
-# CHECK: %r3 = LDRi12 %r0, 12, 14, _
-# CHECK-NEXT: %r0 = LDRi12 %r0, 8, 14, _
+# CHECK: %r3 = LDRi12 %r0, 12, 14, %noreg
+# CHECK-NEXT: %r0 = LDRi12 %r0, 8, 14, %noreg
body: |
bb.0:
liveins: %r0, %r3
diff --git a/test/CodeGen/ARM/local-call.ll b/test/CodeGen/ARM/local-call.ll
index a38df62ff90..c07294685e9 100644
--- a/test/CodeGen/ARM/local-call.ll
+++ b/test/CodeGen/ARM/local-call.ll
@@ -17,4 +17,4 @@ define i64 @test_local_call(i64 %a, i64 %b) {
%res = udiv i64 %a, %b
ret i64 %res
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/ARM/machine-copyprop.mir b/test/CodeGen/ARM/machine-copyprop.mir
index 9be595f690d..bb9c3478d8b 100644
--- a/test/CodeGen/ARM/machine-copyprop.mir
+++ b/test/CodeGen/ARM/machine-copyprop.mir
@@ -3,20 +3,20 @@
# Test that machine copy prop recognizes the implicit-def operands on a COPY
# as clobbering the register.
# CHECK-LABEL: name: func
-# CHECK: %d2 = VMOVv2i32 2, 14, _
+# CHECK: %d2 = VMOVv2i32 2, 14, %noreg
# CHECK: %s5 = COPY %s0, implicit %q1, implicit-def %q1
-# CHECK: VST1q32 %r0, 0, %q1, 14, _
+# CHECK: VST1q32 %r0, 0, %q1, 14, %noreg
# The following two COPYs must not be removed
# CHECK: %s4 = COPY %s20, implicit-def %q1
# CHECK: %s5 = COPY %s0, implicit killed %d0, implicit %q1, implicit-def %q1
-# CHECK: VST1q32 %r2, 0, %q1, 14, _
+# CHECK: VST1q32 %r2, 0, %q1, 14, %noreg
name: func
body: |
bb.0:
- %d2 = VMOVv2i32 2, 14, _
+ %d2 = VMOVv2i32 2, 14, %noreg
%s5 = COPY %s0, implicit %q1, implicit-def %q1
- VST1q32 %r0, 0, %q1, 14, _
+ VST1q32 %r0, 0, %q1, 14, %noreg
%s4 = COPY %s20, implicit-def %q1
%s5 = COPY %s0, implicit killed %d0, implicit %q1, implicit-def %q1
- VST1q32 %r2, 0, %q1, 14, _
+ VST1q32 %r2, 0, %q1, 14, %noreg
...
diff --git a/test/CodeGen/ARM/misched-int-basic-thumb2.mir b/test/CodeGen/ARM/misched-int-basic-thumb2.mir
index 9c34e8e6ecc..4048f1122d1 100644
--- a/test/CodeGen/ARM/misched-int-basic-thumb2.mir
+++ b/test/CodeGen/ARM/misched-int-basic-thumb2.mir
@@ -152,24 +152,24 @@ body: |
%1 = COPY %r1
%0 = COPY %r0
%2 = t2MOVi32imm @g1
- %3 = t2LDRi12 %2, 0, 14, _ :: (dereferenceable load 4 from @g1)
+ %3 = t2LDRi12 %2, 0, 14, %noreg :: (dereferenceable load 4 from @g1)
%4 = t2MOVi32imm @g2
- %5 = t2LDRi12 %4, 0, 14, _ :: (dereferenceable load 4 from @g2)
- %6 = t2ADDrr %3, %3, 14, _, _
- %7 = t2SDIV %6, %5, 14, _
- t2STRi12 %7, %2, 0, 14, _ :: (store 4 into @g1)
- %8 = t2SMULBB %1, %1, 14, _
- %9 = t2SMLABB %0, %0, %8, 14, _
- %10 = t2UXTH %9, 0, 14, _
- %11 = t2MUL %10, %7, 14, _
- %12 = t2MLA %11, %11, %11, 14, _
- %13, %14 = t2UMULL %12, %12, 14, _
- %19, %16 = t2UMULL %13, %13, 14, _
- %17 = t2MLA %13, %14, %16, 14, _
- %20 = t2MLA %13, %14, %17, 14, _
- %19, %20 = t2UMLAL %12, %12, %19, %20, 14, _
+ %5 = t2LDRi12 %4, 0, 14, %noreg :: (dereferenceable load 4 from @g2)
+ %6 = t2ADDrr %3, %3, 14, %noreg, %noreg
+ %7 = t2SDIV %6, %5, 14, %noreg
+ t2STRi12 %7, %2, 0, 14, %noreg :: (store 4 into @g1)
+ %8 = t2SMULBB %1, %1, 14, %noreg
+ %9 = t2SMLABB %0, %0, %8, 14, %noreg
+ %10 = t2UXTH %9, 0, 14, %noreg
+ %11 = t2MUL %10, %7, 14, %noreg
+ %12 = t2MLA %11, %11, %11, 14, %noreg
+ %13, %14 = t2UMULL %12, %12, 14, %noreg
+ %19, %16 = t2UMULL %13, %13, 14, %noreg
+ %17 = t2MLA %13, %14, %16, 14, %noreg
+ %20 = t2MLA %13, %14, %17, 14, %noreg
+ %19, %20 = t2UMLAL %12, %12, %19, %20, 14, %noreg
%r0 = COPY %19
%r1 = COPY %20
- tBX_RET 14, _, implicit %r0, implicit %r1
+ tBX_RET 14, %noreg, implicit %r0, implicit %r1
...
diff --git a/test/CodeGen/ARM/misched-int-basic.mir b/test/CodeGen/ARM/misched-int-basic.mir
index b5d61dfca18..0cad54d975e 100644
--- a/test/CodeGen/ARM/misched-int-basic.mir
+++ b/test/CodeGen/ARM/misched-int-basic.mir
@@ -111,18 +111,18 @@ body: |
%1 = COPY %r1
%0 = COPY %r0
- %2 = SMULBB %1, %1, 14, _
- %3 = SMLABB %0, %0, %2, 14, _
- %4 = UXTH %3, 0, 14, _
- %5 = MUL %4, %4, 14, _, _
- %6 = MLA %5, %5, %5, 14, _, _
- %7, %8 = UMULL %6, %6, 14, _, _
- %13, %10 = UMULL %7, %7, 14, _, _
- %11 = MLA %7, %8, %10, 14, _, _
- %14 = MLA %7, %8, %11, 14, _, _
- %13, %14 = UMLAL %6, %6, %13, %14, 14, _, _
+ %2 = SMULBB %1, %1, 14, %noreg
+ %3 = SMLABB %0, %0, %2, 14, %noreg
+ %4 = UXTH %3, 0, 14, %noreg
+ %5 = MUL %4, %4, 14, %noreg, %noreg
+ %6 = MLA %5, %5, %5, 14, %noreg, %noreg
+ %7, %8 = UMULL %6, %6, 14, %noreg, %noreg
+ %13, %10 = UMULL %7, %7, 14, %noreg, %noreg
+ %11 = MLA %7, %8, %10, 14, %noreg, %noreg
+ %14 = MLA %7, %8, %11, 14, %noreg, %noreg
+ %13, %14 = UMLAL %6, %6, %13, %14, 14, %noreg, %noreg
%r0 = COPY %13
%r1 = COPY %14
- BX_RET 14, _, implicit %r0, implicit %r1
+ BX_RET 14, %noreg, implicit %r0, implicit %r1
...
diff --git a/test/CodeGen/ARM/pei-swiftself.mir b/test/CodeGen/ARM/pei-swiftself.mir
index 055efeea328..d2d3469458b 100644
--- a/test/CodeGen/ARM/pei-swiftself.mir
+++ b/test/CodeGen/ARM/pei-swiftself.mir
@@ -39,7 +39,7 @@ body: |
; not just use %r10 for that.
; CHECK-NOT: STRi12 %1,{{.*}}%r10
- STRi12 %r1, %stack.0, 0, 14, _ :: (store 4)
+ STRi12 %r1, %stack.0, 0, 14, %noreg :: (store 4)
; use the swiftself parameter value.
KILL %r10
diff --git a/test/CodeGen/ARM/pr25317.ll b/test/CodeGen/ARM/pr25317.ll
index 6770c6f84ec..679b5a0299a 100644
--- a/test/CodeGen/ARM/pr25317.ll
+++ b/test/CodeGen/ARM/pr25317.ll
@@ -8,4 +8,4 @@ target triple = "armv7--linux-gnueabihf"
define void @f(i32* %p) {
call void asm sideeffect "str lr, $0", "=*o"(i32* %p)
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/ARM/preferred-align.ll b/test/CodeGen/ARM/preferred-align.ll
index a9a17229e06..26dbb1cbd54 100644
--- a/test/CodeGen/ARM/preferred-align.ll
+++ b/test/CodeGen/ARM/preferred-align.ll
@@ -18,4 +18,4 @@
@var16 = global i16 zeroinitializer
; CHECK: .globl var16
-; CHECK-NEXT: .p2align 1 \ No newline at end of file
+; CHECK-NEXT: .p2align 1
diff --git a/test/CodeGen/ARM/prera-ldst-aliasing.mir b/test/CodeGen/ARM/prera-ldst-aliasing.mir
index ce37106ed8d..cc320086079 100644
--- a/test/CodeGen/ARM/prera-ldst-aliasing.mir
+++ b/test/CodeGen/ARM/prera-ldst-aliasing.mir
@@ -26,15 +26,15 @@ body: |
%1 : gpr = COPY %r1
%0 : gpr = COPY %r0
- %2 : gpr = t2LDRi12 %1, 0, 14, _ :: (load 4 from %ir.y)
- t2STRi12 killed %2, %0, 0, 14, _ :: (store 4 into %ir.x)
- %3 : gpr = t2LDRi12 %1, 4, 14, _ :: (load 4 from %ir.arrayidx2)
- t2STRi12 killed %3, %0, 4, 14, _ :: (store 4 into %ir.arrayidx3)
+ %2 : gpr = t2LDRi12 %1, 0, 14, %noreg :: (load 4 from %ir.y)
+ t2STRi12 killed %2, %0, 0, 14, %noreg :: (store 4 into %ir.x)
+ %3 : gpr = t2LDRi12 %1, 4, 14, %noreg :: (load 4 from %ir.arrayidx2)
+ t2STRi12 killed %3, %0, 4, 14, %noreg :: (store 4 into %ir.arrayidx3)
; CHECK: t2LDRi12
; CHECK-NEXT: t2LDRi12
; CHECK-NEXT: t2STRi12
; CHECK-NEXT: t2STRi12
- tBX_RET 14, _
+ tBX_RET 14, %noreg
...
diff --git a/test/CodeGen/ARM/prera-ldst-insertpt.mir b/test/CodeGen/ARM/prera-ldst-insertpt.mir
index eafcc7c36d3..c0202eb84fa 100644
--- a/test/CodeGen/ARM/prera-ldst-insertpt.mir
+++ b/test/CodeGen/ARM/prera-ldst-insertpt.mir
@@ -28,14 +28,14 @@ body: |
%2 : rgpr = COPY %r2
%1 : rgpr = COPY %r1
%0 : gpr = COPY %r0
- %3 : rgpr = t2MUL %2, %2, 14, _
- %4 : rgpr = t2MUL %1, %1, 14, _
+ %3 : rgpr = t2MUL %2, %2, 14, %noreg
+ %4 : rgpr = t2MUL %1, %1, 14, %noreg
%5 : rgpr = t2MOVi32imm -858993459
- %6 : rgpr, %7 : rgpr = t2UMULL killed %3, %5, 14, _
- %8 : rgpr, %9 : rgpr = t2UMULL killed %4, %5, 14, _
- t2STRi12 %1, %0, 0, 14, _ :: (store 4)
- %10 : rgpr = t2LSLri %2, 1, 14, _, _
- t2STRi12 killed %10, %0, 4, 14, _ :: (store 4)
+ %6 : rgpr, %7 : rgpr = t2UMULL killed %3, %5, 14, %noreg
+ %8 : rgpr, %9 : rgpr = t2UMULL killed %4, %5, 14, %noreg
+ t2STRi12 %1, %0, 0, 14, %noreg :: (store 4)
+ %10 : rgpr = t2LSLri %2, 1, 14, %noreg, %noreg
+ t2STRi12 killed %10, %0, 4, 14, %noreg :: (store 4)
; Make sure we move the paired stores next to each other, and
; insert them in an appropriate location.
@@ -44,17 +44,17 @@ body: |
; CHECK-NEXT: t2MOVi
; CHECK-NEXT: t2ADDrs
- %11 : rgpr = t2MOVi 55, 14, _, _
- %12 : gprnopc = t2ADDrs %11, killed %7, 19, 14, _, _
- t2STRi12 killed %12, %0, 16, 14, _ :: (store 4)
- %13 : gprnopc = t2ADDrs %11, killed %9, 19, 14, _, _
- t2STRi12 killed %13, %0, 20, 14, _ :: (store 4)
+ %11 : rgpr = t2MOVi 55, 14, %noreg, %noreg
+ %12 : gprnopc = t2ADDrs %11, killed %7, 19, 14, %noreg, %noreg
+ t2STRi12 killed %12, %0, 16, 14, %noreg :: (store 4)
+ %13 : gprnopc = t2ADDrs %11, killed %9, 19, 14, %noreg, %noreg
+ t2STRi12 killed %13, %0, 20, 14, %noreg :: (store 4)
; Make sure we move the paired stores next to each other.
; CHECK: t2STRi12 killed %12,
; CHECK-NEXT: t2STRi12 killed %13,
- tBX_RET 14, _
+ tBX_RET 14, %noreg
---
# CHECK-LABEL: name: b
name: b
@@ -71,11 +71,11 @@ body: |
%2 : rgpr = COPY %r2
%1 : rgpr = COPY %r1
%0 : gpr = COPY %r0
- t2STRi12 %1, %0, 0, 14, _ :: (store 4)
- %10 : rgpr = t2LSLri %2, 1, 14, _, _
- t2STRi12 killed %10, %0, 4, 14, _ :: (store 4)
- %3 : rgpr = t2MUL %2, %2, 14, _
- t2STRi12 %3, %0, 8, 14, _ :: (store 4)
+ t2STRi12 %1, %0, 0, 14, %noreg :: (store 4)
+ %10 : rgpr = t2LSLri %2, 1, 14, %noreg, %noreg
+ t2STRi12 killed %10, %0, 4, 14, %noreg :: (store 4)
+ %3 : rgpr = t2MUL %2, %2, 14, %noreg
+ t2STRi12 %3, %0, 8, 14, %noreg :: (store 4)
; Make sure we move the paired stores next to each other, and
; insert them in an appropriate location.
@@ -85,21 +85,21 @@ body: |
; CHECK-NEXT: t2MUL
; CHECK-NEXT: t2MOVi32imm
- %4 : rgpr = t2MUL %1, %1, 14, _
+ %4 : rgpr = t2MUL %1, %1, 14, %noreg
%5 : rgpr = t2MOVi32imm -858993459
- %6 : rgpr, %7 : rgpr = t2UMULL killed %3, %5, 14, _
- %8 : rgpr, %9 : rgpr = t2UMULL killed %4, %5, 14, _
- %10 : rgpr = t2LSLri %2, 1, 14, _, _
- %11 : rgpr = t2MOVi 55, 14, _, _
- %12 : gprnopc = t2ADDrs %11, killed %7, 19, 14, _, _
- t2STRi12 killed %12, %0, 16, 14, _ :: (store 4)
- %13 : gprnopc = t2ADDrs %11, killed %9, 19, 14, _, _
- t2STRi12 killed %13, %0, 20, 14, _ :: (store 4)
+ %6 : rgpr, %7 : rgpr = t2UMULL killed %3, %5, 14, %noreg
+ %8 : rgpr, %9 : rgpr = t2UMULL killed %4, %5, 14, %noreg
+ %10 : rgpr = t2LSLri %2, 1, 14, %noreg, %noreg
+ %11 : rgpr = t2MOVi 55, 14, %noreg, %noreg
+ %12 : gprnopc = t2ADDrs %11, killed %7, 19, 14, %noreg, %noreg
+ t2STRi12 killed %12, %0, 16, 14, %noreg :: (store 4)
+ %13 : gprnopc = t2ADDrs %11, killed %9, 19, 14, %noreg, %noreg
+ t2STRi12 killed %13, %0, 20, 14, %noreg :: (store 4)
; Make sure we move the paired stores next to each other.
; CHECK: t2STRi12 {{.*}}, 16
; CHECK-NEXT: t2STRi12 {{.*}}, 20
- tBX_RET 14, _
+ tBX_RET 14, %noreg
...
diff --git a/test/CodeGen/ARM/scavenging.mir b/test/CodeGen/ARM/scavenging.mir
index dfd02fbee75..c7fb7b3e86c 100644
--- a/test/CodeGen/ARM/scavenging.mir
+++ b/test/CodeGen/ARM/scavenging.mir
@@ -25,36 +25,36 @@ body: |
%r7 = IMPLICIT_DEF
%0 : tgpr = IMPLICIT_DEF
- %0 = tADDhirr %0, %sp, 14, _
- tSTRi %r0, %0, 0, 14, _
+ %0 = tADDhirr %0, %sp, 14, %noreg
+ tSTRi %r0, %0, 0, 14, %noreg
%1 : tgpr = IMPLICIT_DEF
- %1 = tADDhirr %1, %sp, 14, _
- tSTRi %r1, %1, 0, 14, _
+ %1 = tADDhirr %1, %sp, 14, %noreg
+ tSTRi %r1, %1, 0, 14, %noreg
%2 : tgpr = IMPLICIT_DEF
- %2 = tADDhirr %2, %sp, 14, _
- tSTRi %r2, %2, 0, 14, _
+ %2 = tADDhirr %2, %sp, 14, %noreg
+ tSTRi %r2, %2, 0, 14, %noreg
%3 : tgpr = IMPLICIT_DEF
- %3 = tADDhirr %3, %sp, 14, _
- tSTRi %r3, %3, 0, 14, _
+ %3 = tADDhirr %3, %sp, 14, %noreg
+ tSTRi %r3, %3, 0, 14, %noreg
%4 : tgpr = IMPLICIT_DEF
- %4 = tADDhirr %4, %sp, 14, _
- tSTRi %r4, %4, 0, 14, _
+ %4 = tADDhirr %4, %sp, 14, %noreg
+ tSTRi %r4, %4, 0, 14, %noreg
%5 : tgpr = IMPLICIT_DEF
- %5 = tADDhirr %5, %sp, 14, _
- tSTRi %r5, %5, 0, 14, _
+ %5 = tADDhirr %5, %sp, 14, %noreg
+ tSTRi %r5, %5, 0, 14, %noreg
%6 : tgpr = IMPLICIT_DEF
- %6 = tADDhirr %6, %sp, 14, _
- tSTRi %r6, %6, 0, 14, _
+ %6 = tADDhirr %6, %sp, 14, %noreg
+ tSTRi %r6, %6, 0, 14, %noreg
%7 : tgpr = IMPLICIT_DEF
- %7 = tADDhirr %7, %sp, 14, _
- tSTRi %r7, %7, 0, 14, _
+ %7 = tADDhirr %7, %sp, 14, %noreg
+ tSTRi %r7, %7, 0, 14, %noreg
KILL %r0
KILL %r1
diff --git a/test/CodeGen/ARM/sched-it-debug-nodes.mir b/test/CodeGen/ARM/sched-it-debug-nodes.mir
index 2d4fff16067..d88a57dccca 100644
--- a/test/CodeGen/ARM/sched-it-debug-nodes.mir
+++ b/test/CodeGen/ARM/sched-it-debug-nodes.mir
@@ -131,27 +131,27 @@ body: |
bb.0.entry:
liveins: %r0, %r1, %r2, %r3, %lr, %r7
- DBG_VALUE debug-use %r0, debug-use _, !18, !27, debug-location !28
- DBG_VALUE debug-use %r1, debug-use _, !19, !27, debug-location !28
- DBG_VALUE debug-use %r2, debug-use _, !20, !27, debug-location !28
- DBG_VALUE debug-use %r3, debug-use _, !21, !27, debug-location !28
- t2CMPri %r3, 4, 14, _, implicit-def %cpsr, debug-location !31
- DBG_VALUE debug-use %r1, debug-use _, !19, !27, debug-location !28
- %r0 = t2MOVi -1, 3, %cpsr, _, implicit undef %r0
- DBG_VALUE debug-use %r1, debug-use _, !19, !27, debug-location !28
+ DBG_VALUE debug-use %r0, debug-use %noreg, !18, !27, debug-location !28
+ DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28
+ DBG_VALUE debug-use %r2, debug-use %noreg, !20, !27, debug-location !28
+ DBG_VALUE debug-use %r3, debug-use %noreg, !21, !27, debug-location !28
+ t2CMPri %r3, 4, 14, %noreg, implicit-def %cpsr, debug-location !31
+ DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28
+ %r0 = t2MOVi -1, 3, %cpsr, %noreg, implicit undef %r0
+ DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28
tBX_RET 3, %cpsr, implicit %r0, debug-location !34
- %sp = frame-setup t2STMDB_UPD %sp, 14, _, killed %r7, killed %lr
+ %sp = frame-setup t2STMDB_UPD %sp, 14, %noreg, killed %r7, killed %lr
frame-setup CFI_INSTRUCTION def_cfa_offset 8
frame-setup CFI_INSTRUCTION offset %lr, -4
frame-setup CFI_INSTRUCTION offset %r7, -8
- DBG_VALUE debug-use %r0, debug-use _, !18, !27, debug-location !28
- DBG_VALUE debug-use %r1, debug-use _, !19, !27, debug-location !28
- DBG_VALUE debug-use %r2, debug-use _, !20, !27, debug-location !28
- DBG_VALUE debug-use %r3, debug-use _, !21, !27, debug-location !28
- %r1 = tMOVr killed %r2, 14, _, debug-location !32
- %r2 = tMOVr killed %r3, 14, _, debug-location !32
- tBL 14, _, @g, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit-def %sp, debug-location !32
- %r0 = t2MOVi 0, 14, _, _
- %sp = t2LDMIA_RET %sp, 14, _, def %r7, def %pc, implicit %r0
+ DBG_VALUE debug-use %r0, debug-use %noreg, !18, !27, debug-location !28
+ DBG_VALUE debug-use %r1, debug-use %noreg, !19, !27, debug-location !28
+ DBG_VALUE debug-use %r2, debug-use %noreg, !20, !27, debug-location !28
+ DBG_VALUE debug-use %r3, debug-use %noreg, !21, !27, debug-location !28
+ %r1 = tMOVr killed %r2, 14, %noreg, debug-location !32
+ %r2 = tMOVr killed %r3, 14, %noreg, debug-location !32
+ tBL 14, %noreg, @g, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit-def %sp, debug-location !32
+ %r0 = t2MOVi 0, 14, %noreg, %noreg
+ %sp = t2LDMIA_RET %sp, 14, %noreg, def %r7, def %pc, implicit %r0
...
diff --git a/test/CodeGen/ARM/single-issue-r52.mir b/test/CodeGen/ARM/single-issue-r52.mir
index 8dfc5df1dec..b9857a18299 100644
--- a/test/CodeGen/ARM/single-issue-r52.mir
+++ b/test/CodeGen/ARM/single-issue-r52.mir
@@ -76,11 +76,11 @@ body: |
liveins: %r0
%0 = COPY %r0
- %1 = VLD4d8Pseudo %0, 8, 14, _ :: (load 32 from %ir.A, align 8)
- %4 = VADDv8i8 %1.dsub_0, %1.dsub_1, 14, _
- %5, %6 = VMOVRRD %4, 14, _
+ %1 = VLD4d8Pseudo %0, 8, 14, %noreg :: (load 32 from %ir.A, align 8)
+ %4 = VADDv8i8 %1.dsub_0, %1.dsub_1, 14, %noreg
+ %5, %6 = VMOVRRD %4, 14, %noreg
%r0 = COPY %5
%r1 = COPY %6
- BX_RET 14, _, implicit %r0, implicit killed %r1
+ BX_RET 14, %noreg, implicit %r0, implicit killed %r1
...
diff --git a/test/CodeGen/ARM/tail-dup-bundle.mir b/test/CodeGen/ARM/tail-dup-bundle.mir
index 67c1cb5a6b9..719d616f26b 100644
--- a/test/CodeGen/ARM/tail-dup-bundle.mir
+++ b/test/CodeGen/ARM/tail-dup-bundle.mir
@@ -19,7 +19,7 @@ body: |
bb.1:
liveins: %r0
- t2CMPri %r0, 32, 14, _, implicit-def %cpsr
+ t2CMPri %r0, 32, 14, %noreg, implicit-def %cpsr
BUNDLE implicit-def dead %itstate, implicit-def %cpsr, implicit killed %r0, implicit killed %cpsr {
t2IT 1, 24, implicit-def %itstate
t2CMPri killed %r0, 9, 1, killed %cpsr, implicit-def %cpsr, implicit internal killed %itstate
@@ -28,9 +28,9 @@ body: |
bb.2:
%r0 = IMPLICIT_DEF
- t2B %bb.1, 14, _
+ t2B %bb.1, 14, %noreg
bb.3:
%r0 = IMPLICIT_DEF
- t2B %bb.1, 14, _
+ t2B %bb.1, 14, %noreg
...
diff --git a/test/CodeGen/ARM/thumb-litpool.ll b/test/CodeGen/ARM/thumb-litpool.ll
index f68fdb6fdc0..bd8829c22bc 100644
--- a/test/CodeGen/ARM/thumb-litpool.ll
+++ b/test/CodeGen/ARM/thumb-litpool.ll
@@ -12,4 +12,4 @@ define void @foo() minsize {
call void asm sideeffect "", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7}"()
call void @callee(i8* @var)
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/ARM/v6-jumptable-clobber.mir b/test/CodeGen/ARM/v6-jumptable-clobber.mir
index ba25ac2cfe4..52a39ffc5e1 100644
--- a/test/CodeGen/ARM/v6-jumptable-clobber.mir
+++ b/test/CodeGen/ARM/v6-jumptable-clobber.mir
@@ -231,21 +231,21 @@ body: |
successors: %bb.2.d1(0x03c3c3c4), %bb.1(0x7c3c3c3c)
liveins: %r0, %r1
- %r2 = tLDRpci %const.0, 14, _
- tSTRi killed %r2, killed %r1, 0, 14, _ :: (store 4 into %ir.addr)
+ %r2 = tLDRpci %const.0, 14, %noreg
+ tSTRi killed %r2, killed %r1, 0, 14, %noreg :: (store 4 into %ir.addr)
dead %r1 = SPACE 980, undef %r0
- %r0 = tUXTB killed %r0, 14, _
- %r1, dead %cpsr = tSUBi3 killed %r0, 1, 14, _
- tCMPi8 %r1, 25, 14, _, implicit-def %cpsr
+ %r0 = tUXTB killed %r0, 14, %noreg
+ %r1, dead %cpsr = tSUBi3 killed %r0, 1, 14, %noreg
+ tCMPi8 %r1, 25, 14, %noreg, implicit-def %cpsr
tBcc %bb.2.d1, 8, killed %cpsr
bb.1 (%ir-block.0):
successors: %bb.3.d2(0x07c549d2), %bb.9.d8(0x07c549d2), %bb.4.d3(0x07c549d2), %bb.5.d4(0x07c549d2), %bb.6.d5(0x07c549d2), %bb.7.d6(0x07c549d2), %bb.8.d7(0x07c549d2), %bb.10.d9(0x07c549d2), %bb.11.d10(0x07c549d2), %bb.2.d1(0x03ab62db), %bb.12.d11(0x07c549d2), %bb.13.d12(0x07c549d2), %bb.14.d13(0x07c549d2), %bb.15.d14(0x07c549d2), %bb.16.d15(0x07c549d2), %bb.17.d16(0x07c549d2), %bb.18.d17(0x07c549d2)
liveins: %r1
- %r0, dead %cpsr = tLSLri killed %r1, 2, 14, _
- %r1 = tLEApcrelJT %jump-table.0, 14, _
- %r0 = tLDRr killed %r1, killed %r0, 14, _ :: (load 4 from jump-table)
+ %r0, dead %cpsr = tLSLri killed %r1, 2, 14, %noreg
+ %r1 = tLEApcrelJT %jump-table.0, 14, %noreg
+ %r0 = tLDRr killed %r1, killed %r0, 14, %noreg :: (load 4 from jump-table)
tBR_JTr killed %r0, %jump-table.0
bb.3.d2:
@@ -329,20 +329,20 @@ body: |
successors: %bb.2.d1(0x03c3c3c4), %bb.1(0x7c3c3c3c)
liveins: %r0, %r1
- %r2 = tLDRpci %const.0, 14, _
- tSTRi killed %r2, killed %r1, 0, 14, _ :: (store 4 into %ir.addr)
- %r0 = tUXTB killed %r0, 14, _
- %r1, dead %cpsr = tSUBi3 killed %r0, 1, 14, _
- tCMPi8 %r1, 25, 14, _, implicit-def %cpsr
+ %r2 = tLDRpci %const.0, 14, %noreg
+ tSTRi killed %r2, killed %r1, 0, 14, %noreg :: (store 4 into %ir.addr)
+ %r0 = tUXTB killed %r0, 14, %noreg
+ %r1, dead %cpsr = tSUBi3 killed %r0, 1, 14, %noreg
+ tCMPi8 %r1, 25, 14, %noreg, implicit-def %cpsr
tBcc %bb.2.d1, 8, killed %cpsr
bb.1 (%ir-block.0):
successors: %bb.3.d2(0x07c549d2), %bb.9.d8(0x07c549d2), %bb.4.d3(0x07c549d2), %bb.5.d4(0x07c549d2), %bb.6.d5(0x07c549d2), %bb.7.d6(0x07c549d2), %bb.8.d7(0x07c549d2), %bb.10.d9(0x07c549d2), %bb.11.d10(0x07c549d2), %bb.2.d1(0x03ab62db), %bb.12.d11(0x07c549d2), %bb.13.d12(0x07c549d2), %bb.14.d13(0x07c549d2), %bb.15.d14(0x07c549d2), %bb.16.d15(0x07c549d2), %bb.17.d16(0x07c549d2), %bb.18.d17(0x07c549d2)
liveins: %r1
- %r0, dead %cpsr = tLSLri killed %r1, 2, 14, _
- %r1 = tLEApcrelJT %jump-table.0, 14, _
- %r0 = tLDRr killed %r1, killed %r0, 14, _ :: (load 4 from jump-table)
+ %r0, dead %cpsr = tLSLri killed %r1, 2, 14, %noreg
+ %r1 = tLEApcrelJT %jump-table.0, 14, %noreg
+ %r0 = tLDRr killed %r1, killed %r0, 14, %noreg :: (load 4 from jump-table)
tBR_JTr killed %r0, %jump-table.0
bb.3.d2:
diff --git a/test/CodeGen/ARM/vcvt_combine.ll b/test/CodeGen/ARM/vcvt_combine.ll
index 11bed5a1a47..326c5f761a9 100644
--- a/test/CodeGen/ARM/vcvt_combine.ll
+++ b/test/CodeGen/ARM/vcvt_combine.ll
@@ -69,4 +69,4 @@ define <3 x i32> @test_illegal_fp_to_int(<3 x float> %in) {
%scale = fmul <3 x float> %in, <float 4.0, float 4.0, float 4.0>
%val = fptosi <3 x float> %scale to <3 x i32>
ret <3 x i32> %val
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/ARM/vdiv_combine.ll b/test/CodeGen/ARM/vdiv_combine.ll
index 4a6c36b4277..d88fe31a59d 100644
--- a/test/CodeGen/ARM/vdiv_combine.ll
+++ b/test/CodeGen/ARM/vdiv_combine.ll
@@ -160,4 +160,4 @@ define <3 x float> @test_illegal_int_to_fp(<3 x i32> %in) {
%conv = sitofp <3 x i32> %in to <3 x float>
%res = fdiv <3 x float> %conv, <float 4.0, float 4.0, float 4.0>
ret <3 x float> %res
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/ARM/virtregrewriter-subregliveness.mir b/test/CodeGen/ARM/virtregrewriter-subregliveness.mir
index 83335a3ccff..44bc856c914 100644
--- a/test/CodeGen/ARM/virtregrewriter-subregliveness.mir
+++ b/test/CodeGen/ARM/virtregrewriter-subregliveness.mir
@@ -33,7 +33,7 @@ body: |
; CHECK-NEXT: %r1 = KILL %r1, implicit killed %r0_r1
undef %0.gsub_0 = COPY %r0
%0.gsub_1 = COPY %r1
- tBX_RET 14, _, implicit %0
+ tBX_RET 14, %noreg, implicit %0
...
@@ -55,7 +55,7 @@ body: |
; CHECK: %r0 = KILL %r0, implicit-def %r0_r1
; CHECK-NEXT: tBX_RET
undef %0.gsub_0 = COPY %r0
- tBX_RET 14, _, implicit %0
+ tBX_RET 14, %noreg, implicit %0
...
@@ -78,7 +78,7 @@ body: |
; CHECK: %r0 = KILL %r0, implicit-def %r1, implicit-def %r0_r1
; CHECK-NEXT: tBX_RET
undef %0.gsub_0 = COPY %r0, implicit-def %r1
- tBX_RET 14, _, implicit %0
+ tBX_RET 14, %noreg, implicit %0
...
diff --git a/test/CodeGen/ARM/vldm-liveness.mir b/test/CodeGen/ARM/vldm-liveness.mir
index a85a018a8b1..2056be4f008 100644
--- a/test/CodeGen/ARM/vldm-liveness.mir
+++ b/test/CodeGen/ARM/vldm-liveness.mir
@@ -26,15 +26,15 @@ body: |
bb.0 (%ir-block.0):
liveins: %r0
- %s1 = VLDRS %r0, 1, 14, _, implicit-def %q0 :: (load 4)
- %s3 = VLDRS %r0, 2, 14, _, implicit killed %q0, implicit-def %q0 :: (load 4)
- ; CHECK: %s3 = VLDRS %r0, 2, 14, _, implicit killed undef %q0, implicit-def %q0 :: (load 4)
+ %s1 = VLDRS %r0, 1, 14, %noreg, implicit-def %q0 :: (load 4)
+ %s3 = VLDRS %r0, 2, 14, %noreg, implicit killed %q0, implicit-def %q0 :: (load 4)
+ ; CHECK: %s3 = VLDRS %r0, 2, 14, %noreg, implicit killed undef %q0, implicit-def %q0 :: (load 4)
- %s0 = VLDRS %r0, 0, 14, _, implicit killed %q0, implicit-def %q0 :: (load 4)
- ; CHECK: VLDMSIA %r0, 14, _, def %s0, def %s1, implicit-def _
+ %s0 = VLDRS %r0, 0, 14, %noreg, implicit killed %q0, implicit-def %q0 :: (load 4)
+ ; CHECK: VLDMSIA %r0, 14, %noreg, def %s0, def %s1, implicit-def %noreg
- %s2 = VLDRS killed %r0, 4, 14, _, implicit killed %q0, implicit-def %q0 :: (load 4)
- ; CHECK: %s2 = VLDRS killed %r0, 4, 14, _, implicit killed %q0, implicit-def %q0 :: (load 4)
+ %s2 = VLDRS killed %r0, 4, 14, %noreg, implicit killed %q0, implicit-def %q0 :: (load 4)
+ ; CHECK: %s2 = VLDRS killed %r0, 4, 14, %noreg, implicit killed %q0, implicit-def %q0 :: (load 4)
- tBX_RET 14, _, implicit %q0
+ tBX_RET 14, %noreg, implicit %q0
...
diff --git a/test/CodeGen/Hexagon/duplex.ll b/test/CodeGen/Hexagon/duplex.ll
index 80fe61ceccc..9f25726cf59 100644
--- a/test/CodeGen/Hexagon/duplex.ll
+++ b/test/CodeGen/Hexagon/duplex.ll
@@ -4,4 +4,4 @@
define i32 @foo() {
ret i32 0
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/Hexagon/early-if-debug.mir b/test/CodeGen/Hexagon/early-if-debug.mir
index 7c8fb0aee10..2f4a2db0a6e 100644
--- a/test/CodeGen/Hexagon/early-if-debug.mir
+++ b/test/CodeGen/Hexagon/early-if-debug.mir
@@ -6,11 +6,11 @@
# CHECK: %0:intregs = COPY %r0
# CHECK: %1:predregs = C2_cmpeqi %0, 0
# CHECK: %2:intregs = A2_tfrsi 123
-# CHECK: DBG_VALUE debug-use %0, debug-use _
-# CHECK: DBG_VALUE debug-use %0, debug-use _
-# CHECK: DBG_VALUE debug-use %0, debug-use _
-# CHECK: DBG_VALUE debug-use %0, debug-use _
-# CHECK: DBG_VALUE debug-use %0, debug-use _
+# CHECK: DBG_VALUE debug-use %0, debug-use %noreg
+# CHECK: DBG_VALUE debug-use %0, debug-use %noreg
+# CHECK: DBG_VALUE debug-use %0, debug-use %noreg
+# CHECK: DBG_VALUE debug-use %0, debug-use %noreg
+# CHECK: DBG_VALUE debug-use %0, debug-use %noreg
# CHECK: %3:intregs = A2_tfrsi 321
# CHECK: %5:intregs = C2_mux %1, %2, %3
@@ -40,11 +40,11 @@ body: |
J2_jump %bb.1, implicit-def dead %pc
bb.1:
- DBG_VALUE debug-use %0, debug-use _, !1, !1
- DBG_VALUE debug-use %0, debug-use _, !1, !1
- DBG_VALUE debug-use %0, debug-use _, !1, !1
- DBG_VALUE debug-use %0, debug-use _, !1, !1
- DBG_VALUE debug-use %0, debug-use _, !1, !1
+ DBG_VALUE debug-use %0, debug-use %noreg, !1, !1
+ DBG_VALUE debug-use %0, debug-use %noreg, !1, !1
+ DBG_VALUE debug-use %0, debug-use %noreg, !1, !1
+ DBG_VALUE debug-use %0, debug-use %noreg, !1, !1
+ DBG_VALUE debug-use %0, debug-use %noreg, !1, !1
%3 = A2_tfrsi 321
bb.2:
diff --git a/test/CodeGen/MIR/ARM/bundled-instructions.mir b/test/CodeGen/MIR/ARM/bundled-instructions.mir
index 56e21e36270..462d45c90b5 100644
--- a/test/CodeGen/MIR/ARM/bundled-instructions.mir
+++ b/test/CodeGen/MIR/ARM/bundled-instructions.mir
@@ -28,14 +28,14 @@ body: |
bb.0.entry:
liveins: %r0
; CHECK-LABEL: name: test1
- ; CHECK: %r1 = t2MOVi 0, 14, _, _
- ; CHECK-NEXT: t2CMNri killed %r0, 78, 14, _, implicit-def %cpsr
+ ; CHECK: %r1 = t2MOVi 0, 14, %noreg, %noreg
+ ; CHECK-NEXT: t2CMNri killed %r0, 78, 14, %noreg, implicit-def %cpsr
; CHECK-NEXT: BUNDLE implicit-def dead %itstate, implicit-def %r1, implicit killed %cpsr {
; CHECK-NEXT: t2IT 12, 8, implicit-def %itstate
- ; CHECK-NEXT: %r1 = t2MOVi 1, 12, killed %cpsr, _, implicit internal killed %itstate
+ ; CHECK-NEXT: %r1 = t2MOVi 1, 12, killed %cpsr, %noreg, implicit internal killed %itstate
; CHECK-NEXT: }
- ; CHECK-NEXT: %r0 = tMOVr killed %r1, 14, _
- ; CHECK-NEXT: tBX_RET 14, _, implicit killed %r0
+ ; CHECK-NEXT: %r0 = tMOVr killed %r1, 14, %noreg
+ ; CHECK-NEXT: tBX_RET 14, %noreg, implicit killed %r0
%r1 = t2MOVi 0, 14, _, _
t2CMNri killed %r0, 78, 14, _, implicit-def %cpsr
BUNDLE implicit-def dead %itstate, implicit-def %r1, implicit killed %cpsr {
@@ -58,14 +58,14 @@ body: |
; '{' or '}'.
; CHECK-LABEL: name: test2
- ; CHECK: %r1 = t2MOVi 0, 14, _, _
- ; CHECK-NEXT: t2CMNri killed %r0, 78, 14, _, implicit-def %cpsr
+ ; CHECK: %r1 = t2MOVi 0, 14, %noreg, %noreg
+ ; CHECK-NEXT: t2CMNri killed %r0, 78, 14, %noreg, implicit-def %cpsr
; CHECK-NEXT: BUNDLE implicit-def dead %itstate, implicit-def %r1, implicit killed %cpsr {
; CHECK-NEXT: t2IT 12, 8, implicit-def %itstate
- ; CHECK-NEXT: %r1 = t2MOVi 1, 12, killed %cpsr, _, implicit internal killed %itstate
+ ; CHECK-NEXT: %r1 = t2MOVi 1, 12, killed %cpsr, %noreg, implicit internal killed %itstate
; CHECK-NEXT: }
- ; CHECK-NEXT: %r0 = tMOVr killed %r1, 14, _
- ; CHECK-NEXT: tBX_RET 14, _, implicit killed %r0
+ ; CHECK-NEXT: %r0 = tMOVr killed %r1, 14, %noreg
+ ; CHECK-NEXT: tBX_RET 14, %noreg, implicit killed %r0
%r1 = t2MOVi 0, 14, _, _
t2CMNri killed %r0, 78, 14, _, implicit-def %cpsr
BUNDLE implicit-def dead %itstate, implicit-def %r1, implicit killed %cpsr { t2IT 12, 8, implicit-def %itstate
diff --git a/test/CodeGen/MIR/ARM/ifcvt_diamond_unanalyzable.mir b/test/CodeGen/MIR/ARM/ifcvt_diamond_unanalyzable.mir
index a6e5521fd2c..6b7ad20aa12 100644
--- a/test/CodeGen/MIR/ARM/ifcvt_diamond_unanalyzable.mir
+++ b/test/CodeGen/MIR/ARM/ifcvt_diamond_unanalyzable.mir
@@ -26,5 +26,5 @@ body: |
# CHECK: bb.0:
# CHECK: %sp = tADDspi %sp, 2, 1, %cpsr
# CHECK: %sp = tADDspi %sp, 1, 0, %cpsr, implicit %sp
-# CHECK: %sp = tADDspi %sp, 3, 14, _
-# CHECK: BX_RET 14, _
+# CHECK: %sp = tADDspi %sp, 3, 14, %noreg
+# CHECK: BX_RET 14, %noreg
diff --git a/test/CodeGen/MIR/ARM/ifcvt_forked_diamond_unanalyzable.mir b/test/CodeGen/MIR/ARM/ifcvt_forked_diamond_unanalyzable.mir
index 652c333c523..f5f09a8ec4a 100644
--- a/test/CodeGen/MIR/ARM/ifcvt_forked_diamond_unanalyzable.mir
+++ b/test/CodeGen/MIR/ARM/ifcvt_forked_diamond_unanalyzable.mir
@@ -40,9 +40,9 @@ body: |
# CHECK: Bcc %bb.2, 1, %cpsr
# CHECK: bb.1:
-# CHECK: %sp = tADDspi %sp, 4, 14, _
-# CHECK: BX_RET 14, _
+# CHECK: %sp = tADDspi %sp, 4, 14, %noreg
+# CHECK: BX_RET 14, %noreg
# CHECK: bb.2:
-# CHECK: %sp = tADDspi %sp, 3, 14, _
-# CHECK: BX_RET 14, _
+# CHECK: %sp = tADDspi %sp, 3, 14, %noreg
+# CHECK: BX_RET 14, %noreg
diff --git a/test/CodeGen/MIR/ARM/ifcvt_simple_unanalyzable.mir b/test/CodeGen/MIR/ARM/ifcvt_simple_unanalyzable.mir
index d0c6ffdb3fa..8d1c71ac98f 100644
--- a/test/CodeGen/MIR/ARM/ifcvt_simple_unanalyzable.mir
+++ b/test/CodeGen/MIR/ARM/ifcvt_simple_unanalyzable.mir
@@ -21,5 +21,5 @@ body: |
# CHECK: bb.0:
# CHECK: %sp = tADDspi %sp, 2, 0, %cpsr
# CHECK: BX_RET 0, %cpsr
-# CHECK: BX_RET 14, _
+# CHECK: BX_RET 14, %noreg
diff --git a/test/CodeGen/MIR/ARM/ifcvt_triangleWoCvtToNextEdge.mir b/test/CodeGen/MIR/ARM/ifcvt_triangleWoCvtToNextEdge.mir
index 981752654fc..92ecbc8dbbe 100644
--- a/test/CodeGen/MIR/ARM/ifcvt_triangleWoCvtToNextEdge.mir
+++ b/test/CodeGen/MIR/ARM/ifcvt_triangleWoCvtToNextEdge.mir
@@ -47,6 +47,6 @@ body: |
# CHECK: bb.2:
# CHECK-NOT: successors: %bb
# CHECK: tBL 1, %cpsr, @__stack_chk_fail
-# CHECK: %sp = tADDspi %sp, 2, 14, _
-# CHECK: %sp = tADDspi %sp, 2, 14, _
+# CHECK: %sp = tADDspi %sp, 2, 14, %noreg
+# CHECK: %sp = tADDspi %sp, 2, 14, %noreg
# CHECK: tTAILJMPdND @bar, 14, %cpsr
diff --git a/test/CodeGen/MIR/X86/block-address-operands.mir b/test/CodeGen/MIR/X86/block-address-operands.mir
index 2207f936096..85ce65275ce 100644
--- a/test/CodeGen/MIR/X86/block-address-operands.mir
+++ b/test/CodeGen/MIR/X86/block-address-operands.mir
@@ -57,7 +57,7 @@ name: test
body: |
bb.0.entry:
successors: %bb.1.block
- ; CHECK: %rax = LEA64r %rip, 1, _, blockaddress(@test, %ir-block.block), _
+ ; CHECK: %rax = LEA64r %rip, 1, %noreg, blockaddress(@test, %ir-block.block), %noreg
%rax = LEA64r %rip, 1, _, blockaddress(@test, %ir-block.block), _
MOV64mr %rip, 1, _, @addr, _, killed %rax
JMP64m %rip, 1, _, @addr, _
@@ -71,7 +71,7 @@ tracksRegLiveness: true
body: |
bb.0.entry:
successors: %bb.1
- ; CHECK: %rax = LEA64r %rip, 1, _, blockaddress(@test2, %ir-block."quoted block"), _
+ ; CHECK: %rax = LEA64r %rip, 1, %noreg, blockaddress(@test2, %ir-block."quoted block"), %noreg
%rax = LEA64r %rip, 1, _, blockaddress(@test2, %ir-block."quoted block"), _
MOV64mr %rip, 1, _, @addr, _, killed %rax
JMP64m %rip, 1, _, @addr, _
@@ -86,7 +86,7 @@ body: |
bb.0.entry:
liveins: %rdi
; CHECK-LABEL: name: slot_in_other_function
- ; CHECK: %rax = LEA64r %rip, 1, _, blockaddress(@test3, %ir-block.0), _
+ ; CHECK: %rax = LEA64r %rip, 1, %noreg, blockaddress(@test3, %ir-block.0), %noreg
%rax = LEA64r %rip, 1, _, blockaddress(@test3, %ir-block.0), _
MOV64mr killed %rdi, 1, _, 0, _, killed %rax
RETQ
@@ -98,7 +98,7 @@ body: |
bb.0.entry:
successors: %bb.1
; CHECK-LABEL: name: test3
- ; CHECK: %rax = LEA64r %rip, 1, _, blockaddress(@test3, %ir-block.0), _
+ ; CHECK: %rax = LEA64r %rip, 1, %noreg, blockaddress(@test3, %ir-block.0), %noreg
%rax = LEA64r %rip, 1, _, blockaddress(@test3, %ir-block.0), _
MOV64mr %rip, 1, _, @addr, _, killed %rax
JMP64m %rip, 1, _, @addr, _
@@ -111,7 +111,7 @@ name: test4
body: |
bb.0.entry:
successors: %bb.1.block
- ; CHECK: %rax = LEA64r %rip, 1, _, blockaddress(@test, %ir-block.block) + 2, _
+ ; CHECK: %rax = LEA64r %rip, 1, %noreg, blockaddress(@test, %ir-block.block) + 2, %noreg
%rax = LEA64r %rip, 1, _, blockaddress(@test, %ir-block.block) + 2, _
MOV64mr %rip, 1, _, @addr, _, killed %rax
JMP64m %rip, 1, _, @addr, _
diff --git a/test/CodeGen/MIR/X86/constant-pool.mir b/test/CodeGen/MIR/X86/constant-pool.mir
index 60e12d3ddcd..431af44b0c5 100644
--- a/test/CodeGen/MIR/X86/constant-pool.mir
+++ b/test/CodeGen/MIR/X86/constant-pool.mir
@@ -61,8 +61,8 @@ constants:
alignment: 4
body: |
bb.0.entry:
- ; CHECK: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _
- ; CHECK-NEXT: %xmm1 = ADDSSrm killed %xmm1, %rip, 1, _, %const.1, _
+ ; CHECK: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, %noreg, %const.0, %noreg
+ ; CHECK-NEXT: %xmm1 = ADDSSrm killed %xmm1, %rip, 1, %noreg, %const.1, %noreg
%xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _
%xmm1 = ADDSSrm killed %xmm1, %rip, 1, _, %const.1, _
%xmm1 = CVTSS2SDrr killed %xmm1
@@ -117,8 +117,8 @@ constants:
alignment: 1
body: |
bb.0.entry:
- ; CHECK: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _
- ; CHECK-NEXT: %xmm1 = ADDSSrm killed %xmm1, %rip, 1, _, %const.1, _
+ ; CHECK: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, %noreg, %const.0, %noreg
+ ; CHECK-NEXT: %xmm1 = ADDSSrm killed %xmm1, %rip, 1, %noreg, %const.1, %noreg
%xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _
%xmm1 = ADDSSrm killed %xmm1, %rip, 1, _, %const.1, _
%xmm1 = CVTSS2SDrr killed %xmm1
@@ -135,8 +135,8 @@ constants:
value: 'float 6.250000e+00'
body: |
bb.0.entry:
- ; CHECK: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.1 - 12, _
- ; CHECK-NEXT: %xmm1 = ADDSSrm killed %xmm1, %rip, 1, _, %const.0 + 8, _
+ ; CHECK: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, %noreg, %const.1 - 12, %noreg
+ ; CHECK-NEXT: %xmm1 = ADDSSrm killed %xmm1, %rip, 1, %noreg, %const.0 + 8, %noreg
%xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.1 - 12, _
%xmm1 = ADDSSrm killed %xmm1, %rip, 1, _, %const.0 + 8, _
%xmm1 = CVTSS2SDrr killed %xmm1
diff --git a/test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir b/test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir
index 8ae76753984..92ceb1e78e0 100644
--- a/test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir
+++ b/test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir
@@ -31,7 +31,7 @@ body: |
frame-setup PUSH32r undef %eax, implicit-def %esp, implicit %esp
CFI_INSTRUCTION def_cfa_offset 8
; CHECK: name: test
- ; CHECK: %eax = MOV32rm %esp, 1, _, 8, _ :: (load 4 from %fixed-stack.0, align 16)
+ ; CHECK: %eax = MOV32rm %esp, 1, %noreg, 8, %noreg :: (load 4 from %fixed-stack.0, align 16)
%eax = MOV32rm %esp, 1, _, 8, _ :: (load 4 from %fixed-stack.0, align 16)
MOV32mr %esp, 1, _, 0, _, %eax :: (store 4 into %ir.b)
%edx = POP32r implicit-def %esp, implicit %esp
diff --git a/test/CodeGen/MIR/X86/global-value-operands.mir b/test/CodeGen/MIR/X86/global-value-operands.mir
index 9b9554da7bd..8c8dee9214f 100644
--- a/test/CodeGen/MIR/X86/global-value-operands.mir
+++ b/test/CodeGen/MIR/X86/global-value-operands.mir
@@ -64,7 +64,7 @@
name: inc
body: |
bb.0.entry:
- ; CHECK: %rax = MOV64rm %rip, 1, _, @G, _
+ ; CHECK: %rax = MOV64rm %rip, 1, %noreg, @G, %noreg
%rax = MOV64rm %rip, 1, _, @G, _
%eax = MOV32rm %rax, 1, _, 0, _
%eax = INC32r %eax, implicit-def %eflags
@@ -75,7 +75,7 @@ body: |
name: inc2
body: |
bb.0.entry:
- ; CHECK: %rax = MOV64rm %rip, 1, _, @0, _
+ ; CHECK: %rax = MOV64rm %rip, 1, %noreg, @0, %noreg
%rax = MOV64rm %rip, 1, _, @0, _
%eax = MOV32rm %rax, 1, _, 0, _
%eax = INC32r %eax, implicit-def %eflags
@@ -132,7 +132,7 @@ body: |
name: tf
body: |
bb.0.entry:
- ; CHECK: %rax = MOV64rm %rip, 1, _, target-flags(x86-gotpcrel) @G, _
+ ; CHECK: %rax = MOV64rm %rip, 1, %noreg, target-flags(x86-gotpcrel) @G, %noreg
%rax = MOV64rm %rip, 1, _, target-flags(x86-gotpcrel) @G, _
%eax = MOV32rm %rax, 1, _, 0, _
%eax = INC32r %eax, implicit-def %eflags
diff --git a/test/CodeGen/MIR/X86/instructions-debug-location.mir b/test/CodeGen/MIR/X86/instructions-debug-location.mir
index 28809d3ee90..c49dfec53bb 100644
--- a/test/CodeGen/MIR/X86/instructions-debug-location.mir
+++ b/test/CodeGen/MIR/X86/instructions-debug-location.mir
@@ -59,7 +59,7 @@ stack:
body: |
bb.0.entry:
liveins: %edi
- ; CHECK: DBG_VALUE debug-use _, 0, !11, !DIExpression(), debug-location !12
+ ; CHECK: DBG_VALUE debug-use %noreg, 0, !11, !DIExpression(), debug-location !12
; CHECK: %eax = COPY %0, debug-location !13
; CHECK: RETQ %eax, debug-location !13
%0 = COPY %edi
@@ -82,9 +82,9 @@ body: |
liveins: %edi
%0 = COPY %edi
- ; CHECK: DBG_VALUE _, i32 0, !DIExpression(), !12
- ; CHECK-NEXT: DBG_VALUE _, i64 -22, !DIExpression(), !12
- ; CHECK-NEXT: DBG_VALUE _, i128 123492148938512984928424384934328985928, !DIExpression(), !12
+ ; CHECK: DBG_VALUE %noreg, i32 0, !DIExpression(), !12
+ ; CHECK-NEXT: DBG_VALUE %noreg, i64 -22, !DIExpression(), !12
+ ; CHECK-NEXT: DBG_VALUE %noreg, i128 123492148938512984928424384934328985928, !DIExpression(), !12
DBG_VALUE _, i32 0, !DIExpression(), !13
DBG_VALUE _, i64 -22, !DIExpression(), !13
DBG_VALUE _, i128 123492148938512984928424384934328985928, !DIExpression(), !13
diff --git a/test/CodeGen/MIR/X86/jump-table-info.mir b/test/CodeGen/MIR/X86/jump-table-info.mir
index e44f4b237df..52d562c8212 100644
--- a/test/CodeGen/MIR/X86/jump-table-info.mir
+++ b/test/CodeGen/MIR/X86/jump-table-info.mir
@@ -78,7 +78,7 @@ body: |
bb.1.entry:
successors: %bb.3.lbl1, %bb.4.lbl2, %bb.5.lbl3, %bb.6.lbl4
- ; CHECK: %rcx = LEA64r %rip, 1, _, %jump-table.0, _
+ ; CHECK: %rcx = LEA64r %rip, 1, %noreg, %jump-table.0, %noreg
%rcx = LEA64r %rip, 1, _, %jump-table.0, _
%rax = MOVSX64rm32 %rcx, 4, %rax, 0, _
%rax = ADD64rr %rax, %rcx, implicit-def %eflags
@@ -122,7 +122,7 @@ body: |
bb.1.entry:
successors: %bb.3.lbl1, %bb.4.lbl2, %bb.5.lbl3, %bb.6.lbl4
; Verify that the printer will use an id of 0 for this jump table:
- ; CHECK: %rcx = LEA64r %rip, 1, _, %jump-table.0, _
+ ; CHECK: %rcx = LEA64r %rip, 1, %noreg, %jump-table.0, %noreg
%rcx = LEA64r %rip, 1, _, %jump-table.1, _
%rax = MOVSX64rm32 %rcx, 4, %rax, 0, _
%rax = ADD64rr %rax, %rcx, implicit-def %eflags
diff --git a/test/CodeGen/MIR/X86/memory-operands.mir b/test/CodeGen/MIR/X86/memory-operands.mir
index 5ac932e9034..5db721da72e 100644
--- a/test/CodeGen/MIR/X86/memory-operands.mir
+++ b/test/CodeGen/MIR/X86/memory-operands.mir
@@ -198,8 +198,8 @@ liveins:
body: |
bb.0.entry:
liveins: %rdi
- ; CHECK: %eax = MOV32rm %rdi, 1, _, 0, _ :: (load 4 from %ir.a)
- ; CHECK-NEXT: MOV32mi killed %rdi, 1, _, 0, _, 42 :: (store 4 into %ir.a)
+ ; CHECK: %eax = MOV32rm %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.a)
+ ; CHECK-NEXT: MOV32mi killed %rdi, 1, %noreg, 0, %noreg, 42 :: (store 4 into %ir.a)
%eax = MOV32rm %rdi, 1, _, 0, _ :: (load 4 from %ir.a)
MOV32mi killed %rdi, 1, _, 0, _, 42 :: (store 4 into %ir.a)
RETQ %eax
@@ -212,7 +212,7 @@ liveins:
body: |
bb.0.entry2:
liveins: %rdi
- ; CHECK: INC32m killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (store 4 into %ir."a value"), (load 4 from %ir."a value")
+ ; CHECK: INC32m killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (store 4 into %ir."a value"), (load 4 from %ir."a value")
INC32m killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (store 4 into %ir."a value"), (load 4 from %ir."a value")
RETQ
...
@@ -230,8 +230,8 @@ body: |
liveins: %rdi
; Verify that the unnamed local values can be serialized.
; CHECK-LABEL: name: test3
- ; CHECK: %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load 4 from %ir.0)
- ; CHECK: MOV32mr %rsp, 1, _, -4, _, killed %eax :: (store 4 into %ir.1)
+ ; CHECK: %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.0)
+ ; CHECK: MOV32mr %rsp, 1, %noreg, -4, %noreg, killed %eax :: (store 4 into %ir.1)
%eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load 4 from %ir.0)
%eax = INC32r killed %eax, implicit-def dead %eflags
MOV32mr %rsp, 1, _, -4, _, killed %eax :: (store 4 into %ir.1)
@@ -246,8 +246,8 @@ body: |
bb.0.entry:
liveins: %rdi
; CHECK: name: volatile_inc
- ; CHECK: %eax = MOV32rm %rdi, 1, _, 0, _ :: (volatile load 4 from %ir.x)
- ; CHECK: MOV32mr killed %rdi, 1, _, 0, _, %eax :: (volatile store 4 into %ir.x)
+ ; CHECK: %eax = MOV32rm %rdi, 1, %noreg, 0, %noreg :: (volatile load 4 from %ir.x)
+ ; CHECK: MOV32mr killed %rdi, 1, %noreg, 0, %noreg, %eax :: (volatile store 4 into %ir.x)
%eax = MOV32rm %rdi, 1, _, 0, _ :: (volatile load 4 from %ir.x)
%eax = INC32r killed %eax, implicit-def dead %eflags
MOV32mr killed %rdi, 1, _, 0, _, %eax :: (volatile store 4 into %ir.x)
@@ -263,7 +263,7 @@ body: |
bb.0.entry:
liveins: %esi, %rdi
; CHECK: name: non_temporal_store
- ; CHECK: MOVNTImr killed %rdi, 1, _, 0, _, killed %esi :: (non-temporal store 4 into %ir.a)
+ ; CHECK: MOVNTImr killed %rdi, 1, %noreg, 0, %noreg, killed %esi :: (non-temporal store 4 into %ir.a)
MOVNTImr killed %rdi, 1, _, 0, _, killed %esi :: (non-temporal store 4 into %ir.a)
RETQ
...
@@ -276,7 +276,7 @@ body: |
bb.0.entry:
liveins: %rdi
; CHECK: name: invariant_load
- ; CHECK: %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (invariant load 4 from %ir.x)
+ ; CHECK: %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg :: (invariant load 4 from %ir.x)
%eax = MOV32rm killed %rdi, 1, _, 0, _ :: (invariant load 4 from %ir.x)
RETQ %eax
...
@@ -289,10 +289,10 @@ body: |
bb.0.entry:
liveins: %rdi
; CHECK: name: memory_offset
- ; CHECK: %xmm0 = MOVAPSrm %rdi, 1, _, 0, _ :: (load 16 from %ir.vec)
- ; CHECK-NEXT: %xmm1 = MOVAPSrm %rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16)
- ; CHECK: MOVAPSmr %rdi, 1, _, 0, _, killed %xmm0 :: (store 16 into %ir.vec)
- ; CHECK-NEXT: MOVAPSmr killed %rdi, 1, _, 16, _, killed %xmm1 :: (store 16 into %ir.vec + 16)
+ ; CHECK: %xmm0 = MOVAPSrm %rdi, 1, %noreg, 0, %noreg :: (load 16 from %ir.vec)
+ ; CHECK-NEXT: %xmm1 = MOVAPSrm %rdi, 1, %noreg, 16, %noreg :: (load 16 from %ir.vec + 16)
+ ; CHECK: MOVAPSmr %rdi, 1, %noreg, 0, %noreg, killed %xmm0 :: (store 16 into %ir.vec)
+ ; CHECK-NEXT: MOVAPSmr killed %rdi, 1, %noreg, 16, %noreg, killed %xmm1 :: (store 16 into %ir.vec + 16)
%xmm0 = MOVAPSrm %rdi, 1, _, 0, _ :: (load 16 from %ir.vec)
%xmm1 = MOVAPSrm %rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16)
%xmm2 = FsFLD0SS
@@ -310,10 +310,10 @@ body: |
bb.0.entry:
liveins: %rdi
; CHECK: name: memory_alignment
- ; CHECK: %xmm0 = MOVAPSrm %rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align 32)
- ; CHECK-NEXT: %xmm1 = MOVAPSrm %rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32)
- ; CHECK: MOVAPSmr %rdi, 1, _, 0, _, killed %xmm0 :: (store 16 into %ir.vec, align 32)
- ; CHECK-NEXT: MOVAPSmr killed %rdi, 1, _, 16, _, killed %xmm1 :: (store 16 into %ir.vec + 16, align 32)
+ ; CHECK: %xmm0 = MOVAPSrm %rdi, 1, %noreg, 0, %noreg :: (load 16 from %ir.vec, align 32)
+ ; CHECK-NEXT: %xmm1 = MOVAPSrm %rdi, 1, %noreg, 16, %noreg :: (load 16 from %ir.vec + 16, align 32)
+ ; CHECK: MOVAPSmr %rdi, 1, %noreg, 0, %noreg, killed %xmm0 :: (store 16 into %ir.vec, align 32)
+ ; CHECK-NEXT: MOVAPSmr killed %rdi, 1, %noreg, 16, %noreg, killed %xmm1 :: (store 16 into %ir.vec + 16, align 32)
%xmm0 = MOVAPSrm %rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align 32)
%xmm1 = MOVAPSrm %rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32)
%xmm2 = FsFLD0SS
@@ -334,8 +334,8 @@ body: |
bb.0.entry:
liveins: %xmm0
; CHECK: name: constant_pool_psv
- ; CHECK: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _ :: (load 8 from constant-pool)
- ; CHECK-NEXT: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _ :: (load 8 from constant-pool + 8)
+ ; CHECK: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, %noreg, %const.0, %noreg :: (load 8 from constant-pool)
+ ; CHECK-NEXT: %xmm0 = ADDSDrm killed %xmm0, %rip, 1, %noreg, %const.0, %noreg :: (load 8 from constant-pool + 8)
%xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _ :: (load 8 from constant-pool)
%xmm0 = ADDSDrm killed %xmm0, %rip, 1, _, %const.0, _ :: (load 8 from constant-pool + 8)
RETQ %xmm0
@@ -355,9 +355,9 @@ body: |
bb.0.entry:
%rsp = frame-setup SUB64ri8 %rsp, 24, implicit-def dead %eflags
CFI_INSTRUCTION def_cfa_offset 32
- LD_F80m %rsp, 1, _, 32, _, implicit-def dead %fpsw
+ LD_F80m %rsp, 1, %noreg, 32, %noreg, implicit-def dead %fpsw
; CHECK: name: stack_psv
- ; CHECK: ST_FP80m %rsp, 1, _, 0, _, implicit-def dead %fpsw :: (store 10 into stack, align 16)
+ ; CHECK: ST_FP80m %rsp, 1, %noreg, 0, %noreg, implicit-def dead %fpsw :: (store 10 into stack, align 16)
ST_FP80m %rsp, 1, _, 0, _, implicit-def dead %fpsw :: (store 10 into stack, align 16)
CALL64pcrel32 $cosl, csr_64, implicit %rsp, implicit-def %rsp, implicit-def %fp0
%rsp = ADD64ri8 %rsp, 24, implicit-def dead %eflags
@@ -369,7 +369,7 @@ tracksRegLiveness: true
body: |
bb.0.entry:
; CHECK: name: got_psv
- ; CHECK: %rax = MOV64rm %rip, 1, _, @G, _ :: (load 8 from got)
+ ; CHECK: %rax = MOV64rm %rip, 1, %noreg, @G, %noreg :: (load 8 from got)
%rax = MOV64rm %rip, 1, _, @G, _ :: (load 8 from got)
%eax = MOV32rm killed %rax, 1, _, 0, _
%eax = INC32r killed %eax, implicit-def dead %eflags
@@ -382,8 +382,8 @@ body: |
bb.0.entry:
%rax = MOV64rm %rip, 1, _, @G, _
; CHECK-LABEL: name: global_value
- ; CHECK: %eax = MOV32rm killed %rax, 1, _, 0, _, implicit-def %rax :: (load 4 from @G)
- ; CHECK: %ecx = MOV32rm killed %rcx, 1, _, 0, _, implicit-def %rcx :: (load 4 from @0)
+ ; CHECK: %eax = MOV32rm killed %rax, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from @G)
+ ; CHECK: %ecx = MOV32rm killed %rcx, 1, %noreg, 0, %noreg, implicit-def %rcx :: (load 4 from @0)
%eax = MOV32rm killed %rax, 1, _, 0, _, implicit-def %rax :: (load 4 from @G)
%rcx = MOV64rm %rip, 1, _, @0, _
%ecx = MOV32rm killed %rcx, 1, _, 0, _, implicit-def %rcx :: (load 4 from @0)
@@ -415,7 +415,7 @@ body: |
%rcx = LEA64r %rip, 1, _, %jump-table.0, _
; CHECK: name: jumptable_psv
- ; CHECK: %rax = MOVSX64rm32 %rcx, 4, killed %rax, 0, _ :: (load 4 from jump-table, align 8)
+ ; CHECK: %rax = MOVSX64rm32 %rcx, 4, killed %rax, 0, %noreg :: (load 4 from jump-table, align 8)
%rax = MOVSX64rm32 %rcx, 4, killed %rax, 0, _ :: (load 4 from jump-table, align 8)
%rax = ADD64rr killed %rax, killed %rcx, implicit-def dead %eflags
JMP64r killed %rax
@@ -447,8 +447,8 @@ body: |
bb.0.entry:
%rax = MOV64rm %rip, 1, _, @a, _ :: (load 8 from got)
; CHECK-LABEL: name: tbaa_metadata
- ; CHECK: %eax = MOV32rm killed %rax, 1, _, 0, _, implicit-def %rax :: (load 4 from @a, !tbaa !2)
- ; CHECK-NEXT: %eax = MOV32rm killed %rax, 1, _, 0, _ :: (load 4 from %ir.total_len2, !tbaa !6)
+ ; CHECK: %eax = MOV32rm killed %rax, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from @a, !tbaa !2)
+ ; CHECK-NEXT: %eax = MOV32rm killed %rax, 1, %noreg, 0, %noreg :: (load 4 from %ir.total_len2, !tbaa !6)
%eax = MOV32rm killed %rax, 1, _, 0, _, implicit-def %rax :: (load 4 from @a, !tbaa !2)
%eax = MOV32rm killed %rax, 1, _, 0, _ :: (load 4 from %ir.total_len2, !tbaa !6)
RETQ %eax
@@ -463,9 +463,9 @@ body: |
bb.0.entry:
liveins: %rdi, %rsi
; CHECK-LABEL: name: aa_scope
- ; CHECK: %xmm0 = MOVSSrm %rsi, 1, _, 0, _ :: (load 4 from %ir.c, !alias.scope !9)
+ ; CHECK: %xmm0 = MOVSSrm %rsi, 1, %noreg, 0, %noreg :: (load 4 from %ir.c, !alias.scope !9)
%xmm0 = MOVSSrm %rsi, 1, _, 0, _ :: (load 4 from %ir.c, !alias.scope !9)
- ; CHECK-NEXT: MOVSSmr %rdi, 1, _, 20, _, killed %xmm0 :: (store 4 into %ir.arrayidx.i, !noalias !9)
+ ; CHECK-NEXT: MOVSSmr %rdi, 1, %noreg, 20, %noreg, killed %xmm0 :: (store 4 into %ir.arrayidx.i, !noalias !9)
MOVSSmr %rdi, 1, _, 20, _, killed %xmm0 :: (store 4 into %ir.arrayidx.i, !noalias !9)
%xmm0 = MOVSSrm killed %rsi, 1, _, 0, _ :: (load 4 from %ir.c)
MOVSSmr killed %rdi, 1, _, 28, _, killed %xmm0 :: (store 4 into %ir.arrayidx)
@@ -480,7 +480,7 @@ body: |
bb.0.entry:
liveins: %rdi
; CHECK-LABEL: name: range_metadata
- ; CHECK: %al = MOV8rm killed %rdi, 1, _, 0, _ :: (load 1 from %ir.x, !range !11)
+ ; CHECK: %al = MOV8rm killed %rdi, 1, %noreg, 0, %noreg :: (load 1 from %ir.x, !range !11)
%al = MOV8rm killed %rdi, 1, _, 0, _ :: (load 1 from %ir.x, !range !11)
RETQ %al
...
@@ -495,7 +495,7 @@ body: |
%rax = MOV64rm %rip, 1, _, @values, _ :: (load 8 from got)
; CHECK-LABEL: gep_value
- ; CHECK: MOV32mr killed %rax, 1, _, 0, _, %edi, implicit killed %rdi :: (store 4 into `i32* getelementptr inbounds ([50 x %st], [50 x %st]* @values, i64 0, i64 0, i32 0)`, align 16)
+ ; CHECK: MOV32mr killed %rax, 1, %noreg, 0, %noreg, %edi, implicit killed %rdi :: (store 4 into `i32* getelementptr inbounds ([50 x %st], [50 x %st]* @values, i64 0, i64 0, i32 0)`, align 16)
MOV32mr killed %rax, 1, _, 0, _, %edi, implicit killed %rdi :: (store 4 into `i32* getelementptr inbounds ([50 x %st], [50 x %st]* @values, i64 0, i64 0, i32 0)`, align 16)
RETQ
...
@@ -505,14 +505,14 @@ tracksRegLiveness: true
body: |
bb.0.entry:
; CHECK-LABEL: name: undef_value
- ; CHECK: %rax = MOV64rm undef %rax, 1, _, 0, _ :: (load 8 from `i8** undef`)
+ ; CHECK: %rax = MOV64rm undef %rax, 1, %noreg, 0, %noreg :: (load 8 from `i8** undef`)
%rax = MOV64rm undef %rax, 1, _, 0, _ :: (load 8 from `i8** undef`)
RETQ %rax
...
---
# Test memory operand without associated value.
# CHECK-LABEL: name: dummy0
-# CHECK: %rax = MOV64rm undef %rax, 1, _, 0, _ :: (load 8)
+# CHECK: %rax = MOV64rm undef %rax, 1, %noreg, 0, %noreg :: (load 8)
name: dummy0
tracksRegLiveness: true
body: |
@@ -523,7 +523,7 @@ body: |
---
# Test parsing of stack references in machine memory operands.
# CHECK-LABEL: name: dummy1
-# CHECK: %rax = MOV64rm %rsp, 1, _, 0, _ :: (load 8 from %stack.0)
+# CHECK: %rax = MOV64rm %rsp, 1, %noreg, 0, %noreg :: (load 8 from %stack.0)
name: dummy1
tracksRegLiveness: true
stack:
diff --git a/test/CodeGen/MIR/X86/metadata-operands.mir b/test/CodeGen/MIR/X86/metadata-operands.mir
index 501d0c58a63..94091cdb827 100644
--- a/test/CodeGen/MIR/X86/metadata-operands.mir
+++ b/test/CodeGen/MIR/X86/metadata-operands.mir
@@ -51,7 +51,7 @@ body: |
bb.0.entry:
liveins: %edi
; CHECK: %0:gr32 = COPY %edi
- ; CHECK-NEXT: DBG_VALUE _, 0, !11, !DIExpression()
+ ; CHECK-NEXT: DBG_VALUE %noreg, 0, !11, !DIExpression()
%0 = COPY %edi
DBG_VALUE _, 0, !12, !DIExpression()
MOV32mr %stack.0.x.addr, 1, _, 0, _, %0
diff --git a/test/CodeGen/MIR/X86/null-register-operands.mir b/test/CodeGen/MIR/X86/null-register-operands.mir
index 9cba00bc9e5..bb7a2e5688c 100644
--- a/test/CodeGen/MIR/X86/null-register-operands.mir
+++ b/test/CodeGen/MIR/X86/null-register-operands.mir
@@ -15,7 +15,7 @@
name: deref
body: |
bb.0.entry:
- ; CHECK: %eax = MOV32rm %rdi, 1, _, 0, _
+ ; CHECK: %eax = MOV32rm %rdi, 1, %noreg, 0, %noreg
; CHECK-NEXT: RETQ %eax
%eax = MOV32rm %rdi, 1, _, 0, %noreg
RETQ %eax
diff --git a/test/CodeGen/MIR/X86/roundtrip.mir b/test/CodeGen/MIR/X86/roundtrip.mir
index 9679b52f2ba..6d5c3516f33 100644
--- a/test/CodeGen/MIR/X86/roundtrip.mir
+++ b/test/CodeGen/MIR/X86/roundtrip.mir
@@ -8,7 +8,7 @@
# CHECK: bb.0:
# CHECK: %0:gr32 = MOV32r0 implicit-def %eflags
# CHECK: dead %1:gr32 = COPY %0
-# CHECK: MOV32mr undef %rcx, 1, _, 0, _, killed %0 :: (volatile store 4)
+# CHECK: MOV32mr undef %rcx, 1, %noreg, 0, %noreg, killed %0 :: (volatile store 4)
# CHECK: RETQ undef %eax
name: func0
body: |
diff --git a/test/CodeGen/MIR/X86/stack-object-operands.mir b/test/CodeGen/MIR/X86/stack-object-operands.mir
index 262b6dcb399..806caf6a290 100644
--- a/test/CodeGen/MIR/X86/stack-object-operands.mir
+++ b/test/CodeGen/MIR/X86/stack-object-operands.mir
@@ -32,10 +32,10 @@ stack:
body: |
bb.0.entry:
; CHECK-LABEL: name: test
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, _, 0, _
- ; CHECK: MOV32mr %stack.0.b, 1, _, 0, _, [[MOV32rm]]
- ; CHECK: MOV32mi %stack.1, 1, _, 0, _, 2
- ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %stack.0.b, 1, _, 0, _
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg
+ ; CHECK: MOV32mr %stack.0.b, 1, %noreg, 0, %noreg, [[MOV32rm]]
+ ; CHECK: MOV32mi %stack.1, 1, %noreg, 0, %noreg, 2
+ ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %stack.0.b, 1, %noreg, 0, %noreg
; CHECK: %eax = COPY [[MOV32rm1]]
; CHECK: RETL %eax
%0 = MOV32rm %fixed-stack.0, 1, _, 0, _
diff --git a/test/CodeGen/Mips/const-mult.ll b/test/CodeGen/Mips/const-mult.ll
index 47efdbf163c..459aad61828 100644
--- a/test/CodeGen/Mips/const-mult.ll
+++ b/test/CodeGen/Mips/const-mult.ll
@@ -90,4 +90,4 @@ define i128 @mul170141183460469231731687303715884105723_128(i128 signext %a) {
entry:
%mul = mul nsw i128 %a, 170141183460469231731687303715884105723
ret i128 %mul
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/Mips/mips64signextendsesf.ll b/test/CodeGen/Mips/mips64signextendsesf.ll
index d0ce1b86bf5..2ee1e09f500 100644
--- a/test/CodeGen/Mips/mips64signextendsesf.ll
+++ b/test/CodeGen/Mips/mips64signextendsesf.ll
@@ -211,4 +211,4 @@ declare float @fminf(float, float) #1
attributes #0 = { nounwind "use-soft-float"="true" }
-attributes #1 = { nounwind readnone "use-soft-float"="true" } \ No newline at end of file
+attributes #1 = { nounwind readnone "use-soft-float"="true" }
diff --git a/test/CodeGen/PowerPC/cxx_tlscc64.ll b/test/CodeGen/PowerPC/cxx_tlscc64.ll
index 0f45652cd29..cfa243f200a 100644
--- a/test/CodeGen/PowerPC/cxx_tlscc64.ll
+++ b/test/CodeGen/PowerPC/cxx_tlscc64.ll
@@ -40,4 +40,4 @@ define cxx_fast_tlscc i32* @_ZTW4sum2() #0 {
ret i32* @sum1
}
-attributes #0 = { nounwind "no-frame-pointer-elim"="true" } \ No newline at end of file
+attributes #0 = { nounwind "no-frame-pointer-elim"="true" }
diff --git a/test/CodeGen/PowerPC/debuginfo-split-int.ll b/test/CodeGen/PowerPC/debuginfo-split-int.ll
index 4bcf43c9dae..e6f0ee20208 100644
--- a/test/CodeGen/PowerPC/debuginfo-split-int.ll
+++ b/test/CodeGen/PowerPC/debuginfo-split-int.ll
@@ -27,9 +27,9 @@ target triple = "ppc32"
;
; High 32 bits in R3, low 32 bits in R4
; CHECK: %0:gprc = COPY %r3
-; CHECK: DBG_VALUE debug-use %0, debug-use _, [[DL]], !DIExpression(DW_OP_LLVM_fragment, 0, 32)
+; CHECK: DBG_VALUE debug-use %0, debug-use %noreg, [[DL]], !DIExpression(DW_OP_LLVM_fragment, 0, 32)
; CHECK: %1:gprc = COPY %r4
-; CHECK: DBG_VALUE debug-use %1, debug-use _, [[DL]], !DIExpression(DW_OP_LLVM_fragment, 32, 32)
+; CHECK: DBG_VALUE debug-use %1, debug-use %noreg, [[DL]], !DIExpression(DW_OP_LLVM_fragment, 32, 32)
define void @bar() local_unnamed_addr #0 !dbg !6 {
%1 = alloca i64, align 8
%2 = tail call i64 @foo()
diff --git a/test/CodeGen/PowerPC/ppc32-align-long-double-sf.ll b/test/CodeGen/PowerPC/ppc32-align-long-double-sf.ll
index c8435a97b79..740bc787ec9 100644
--- a/test/CodeGen/PowerPC/ppc32-align-long-double-sf.ll
+++ b/test/CodeGen/PowerPC/ppc32-align-long-double-sf.ll
@@ -18,4 +18,4 @@ declare i32 @printf(i8* nocapture readonly, ...) #0
attributes #0 = { "use-soft-float"="true" }
- \ No newline at end of file
+
diff --git a/test/CodeGen/SPARC/LeonItinerariesUT.ll b/test/CodeGen/SPARC/LeonItinerariesUT.ll
index 87e0c4621c0..5a6be134686 100644
--- a/test/CodeGen/SPARC/LeonItinerariesUT.ll
+++ b/test/CodeGen/SPARC/LeonItinerariesUT.ll
@@ -47,4 +47,4 @@ entry:
%6 = fmul float %5, %3
%7 = fdiv float %6, %4
ret float %7
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir b/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
index 2f532f0a5ef..e4365f605cb 100644
--- a/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
+++ b/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
@@ -149,7 +149,7 @@ body: |
%11 = VGBM 0
%43 = LHIMux 0
%44 = LARL %const.0
- %45 = VL64 %44, 0, _ :: (load 8 from constant-pool)
+ %45 = VL64 %44, 0, %noreg :: (load 8 from constant-pool)
bb.1:
ADJCALLSTACKDOWN 0, 0
@@ -160,19 +160,19 @@ body: |
KILL killed %f0d
bb.2:
- %17 = VLGVH %11, _, 0
+ %17 = VLGVH %11, %noreg, 0
%19 = LHR %17.subreg_l32
undef %20.subreg_l64 = LGHI 0
%20 = DSGFR %20, %19
- %22 = VLGVH %11, _, 3
+ %22 = VLGVH %11, %noreg, 3
%24 = LHR %22.subreg_l32
undef %25.subreg_l64 = LGHI 0
%25 = DSGFR %25, %24
- %31 = VLGVH %11, _, 1
+ %31 = VLGVH %11, %noreg, 1
%33 = LHR %31.subreg_l32
undef %34.subreg_l64 = LGHI 0
%34 = DSGFR %34, %33
- %37 = VLGVH %11, _, 2
+ %37 = VLGVH %11, %noreg, 2
%39 = LHR %37.subreg_l32
undef %40.subreg_l64 = LGHI 0
%40 = DSGFR %40, %39
@@ -191,10 +191,10 @@ body: |
bb.4:
%36 = VLVGP %25.subreg_l64, %25.subreg_l64
- %36 = VLVGH %36, %20.subreg_l32, _, 0
- %36 = VLVGH %36, %34.subreg_l32, _, 1
- dead %36 = VLVGH %36, %40.subreg_l32, _, 2
- %4 = LG undef %42, 0, _ :: (load 8 from `i64* undef`)
+ %36 = VLVGH %36, %20.subreg_l32, %noreg, 0
+ %36 = VLVGH %36, %34.subreg_l32, %noreg, 1
+ dead %36 = VLVGH %36, %40.subreg_l32, %noreg, 2
+ %4 = LG undef %42, 0, %noreg :: (load 8 from `i64* undef`)
undef %57.subreg_h64 = LLILL 0
undef %66.subreg_h64 = LLILL 0
undef %79.subreg_h64 = LLILL 0
@@ -204,27 +204,27 @@ body: |
bb.5:
bb.6:
- %51 = VLGVH undef %7, _, 0
+ %51 = VLGVH undef %7, %noreg, 0
%53 = LLHRMux %51.subreg_l32
- %54 = VLGVH undef %1, _, 0
+ %54 = VLGVH undef %1, %noreg, 0
%57.subreg_l32 = LLHRMux %54.subreg_l32
%58 = COPY %57
%58 = DLR %58, %53
- %60 = VLGVH undef %7, _, 3
+ %60 = VLGVH undef %7, %noreg, 3
%62 = LLHRMux %60.subreg_l32
- %63 = VLGVH undef %1, _, 3
+ %63 = VLGVH undef %1, %noreg, 3
%66.subreg_l32 = LLHRMux %63.subreg_l32
%67 = COPY %66
%67 = DLR %67, %62
- %73 = VLGVH undef %7, _, 1
+ %73 = VLGVH undef %7, %noreg, 1
%75 = LLHRMux %73.subreg_l32
- %76 = VLGVH undef %1, _, 1
+ %76 = VLGVH undef %1, %noreg, 1
%79.subreg_l32 = LLHRMux %76.subreg_l32
%80 = COPY %79
%80 = DLR %80, %75
- %83 = VLGVH undef %7, _, 2
+ %83 = VLGVH undef %7, %noreg, 2
%85 = LLHRMux %83.subreg_l32
- %86 = VLGVH undef %1, _, 2
+ %86 = VLGVH undef %1, %noreg, 2
%89.subreg_l32 = LLHRMux %86.subreg_l32
%90 = COPY %89
%90 = DLR %90, %85
@@ -248,12 +248,12 @@ body: |
bb.9:
%82 = VLVGP %67.subreg_h64, %67.subreg_h64
- %82 = VLVGH %82, %58.subreg_hl32, _, 0
- %82 = VLVGH %82, %80.subreg_hl32, _, 1
- dead %82 = VLVGH %82, %90.subreg_hl32, _, 2
+ %82 = VLVGH %82, %58.subreg_hl32, %noreg, 0
+ %82 = VLVGH %82, %80.subreg_hl32, %noreg, 1
+ dead %82 = VLVGH %82, %90.subreg_hl32, %noreg, 2
%96 = AFIMux %96, 1879048192, implicit-def dead %cc
- %96 = SRL %96, _, 31
- dead %11 = VLVGF %11, %96, _, 1
+ %96 = SRL %96, %noreg, 31
+ dead %11 = VLVGF %11, %96, %noreg, 1
%100 = LHIMux 0
bb.10:
diff --git a/test/CodeGen/SystemZ/clear-liverange-spillreg.mir b/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
index 0ab969b56cc..06729f0b91a 100644
--- a/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
+++ b/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
@@ -223,14 +223,14 @@ body: |
bb.11:
%4 = COPY %60
- %6 = SLLG %120, _, 1
+ %6 = SLLG %120, %noreg, 1
%7 = LA %6, 64, %41
%6 = AGR %6, %42, implicit-def dead %cc
- %45 = SRLK %120.subreg_l32, _, 31
+ %45 = SRLK %120.subreg_l32, %noreg, 31
%45 = AR %45, %120.subreg_l32, implicit-def dead %cc
%45 = NIFMux %45, 536870910, implicit-def dead %cc
%47 = SRK %120.subreg_l32, %45, implicit-def dead %cc
- %47 = SLL %47, _, 3
+ %47 = SLL %47, %noreg, 3
%81 = LGFR %47
bb.12:
@@ -284,43 +284,43 @@ body: |
MVHI %0, 332, 2 :: (store 4)
%60 = COPY %126
%60 = AR %60, %4, implicit-def dead %cc
- %18 = LHMux %6, 0, _ :: (load 2)
+ %18 = LHMux %6, 0, %noreg :: (load 2)
CHIMux %38, 0, implicit-def %cc
BRC 14, 6, %bb.19, implicit killed %cc
J %bb.18
bb.18:
- %62 = SLLG %81, _, 1
+ %62 = SLLG %81, %noreg, 1
%64 = LA %62, 0, %63
- %65 = LG undef %66, 0, _ :: (load 8)
- %67 = LGF undef %68, 0, _ :: (load 4)
+ %65 = LG undef %66, 0, %noreg :: (load 8)
+ %67 = LGF undef %68, 0, %noreg :: (load 4)
MVC undef %69, 0, 2, %64, 0 :: (store 2), (load 2)
%70 = COPY %81
%70 = OILL64 %70, 3, implicit-def dead %cc
- %71 = LA %70, 2, _
- %72 = SLLG %71, _, 1
+ %71 = LA %70, 2, %noreg
+ %72 = SLLG %71, %noreg, 1
%73 = LHMux %72, 0, %63 :: (load 2)
%74 = LA %70, 2, %67
- %75 = SLLG %74, _, 1
- %76 = LG %65, 0, _ :: (load 8)
+ %75 = SLLG %74, %noreg, 1
+ %76 = LG %65, 0, %noreg :: (load 8)
STHMux %73, %76, 0, %75 :: (store 2)
- %77 = LG undef %78, 0, _ :: (load 8)
+ %77 = LG undef %78, 0, %noreg :: (load 8)
%79 = LHRL @rec_mbY8x8 :: (load 2)
- STHMux %79, %77, 0, _ :: (store 2)
+ STHMux %79, %77, 0, %noreg :: (store 2)
%80 = LHMux %72, 0, %63 :: (load 2)
STHMux %80, %77, 0, %75 :: (store 2)
%81 = OILL64 %81, 7, implicit-def dead %cc
- %82 = SLLG %81, _, 1
+ %82 = SLLG %81, %noreg, 1
%83 = LHMux %82, 0, %63 :: (load 2)
- STHMux %83, %77, 0, _ :: (store 2)
+ STHMux %83, %77, 0, %noreg :: (store 2)
%84 = LA %62, 64, %63
MVC undef %85, 0, 2, %84, 0 :: (store 2), (load 2)
- %86 = SLLG %70, _, 1
+ %86 = SLLG %70, %noreg, 1
%87 = LHMux %86, 64, %63 :: (load 2)
- %88 = SLLG %67, _, 3
+ %88 = SLLG %67, %noreg, 3
%89 = LG %65, 16, %88 :: (load 8)
%90 = LA %70, 0, %67
- %91 = SLLG %90, _, 1
+ %91 = SLLG %90, %noreg, 1
STHMux %87, %89, 0, %91 :: (store 2)
%92 = LA %72, 64, %63
MVC undef %93, 0, 2, %92, 0 :: (store 2), (load 2)
@@ -332,8 +332,8 @@ body: |
bb.19:
successors: %bb.20(0x04000000), %bb.11(0x7c000000)
- %98 = LGH %7, 0, _ :: (load 2)
- %99 = LGH undef %100, 0, _ :: (load 2)
+ %98 = LGH %7, 0, %noreg :: (load 2)
+ %99 = LGH undef %100, 0, %noreg :: (load 2)
ADJCALLSTACKDOWN 0, 0
%101 = LGFR %120.subreg_l32
%102 = LGFR %18
@@ -347,7 +347,7 @@ body: |
ADJCALLSTACKDOWN 0, 0
CallBRASL @reset_coding_state, undef %r2d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc
ADJCALLSTACKUP 0, 0
- %120 = LA %120, 1, _
+ %120 = LA %120, 1, %noreg
CGHI %120, 4, implicit-def %cc
BRC 14, 6, %bb.11, implicit killed %cc
J %bb.20
@@ -410,7 +410,7 @@ body: |
bb.30:
successors: %bb.33(0x00000001), %bb.31(0x7fffffff)
- VST64 %130, undef %117, 0, _ :: (store 8)
+ VST64 %130, undef %117, 0, %noreg :: (store 8)
CHIMux undef %118, 2, implicit-def %cc
BRC 14, 8, %bb.33, implicit killed %cc
J %bb.31
@@ -470,7 +470,7 @@ body: |
bb.44:
bb.45:
- %0 = LG undef %22, 0, _ :: (load 8)
+ %0 = LG undef %22, 0, %noreg :: (load 8)
%38 = LHIMux 0
STRL %38, @bi_pred_me :: (store 4)
%120 = LGHI 0
diff --git a/test/CodeGen/SystemZ/fp-cmp-07.mir b/test/CodeGen/SystemZ/fp-cmp-07.mir
index 1d766fe300a..2551d19b53b 100644
--- a/test/CodeGen/SystemZ/fp-cmp-07.mir
+++ b/test/CodeGen/SystemZ/fp-cmp-07.mir
@@ -38,7 +38,7 @@ body: |
bb.1.store:
liveins: %f0s, %r2d
- STE %f0s, killed %r2d, 0, _ :: (store 4 into %ir.dest)
+ STE %f0s, killed %r2d, 0, %noreg :: (store 4 into %ir.dest)
Return implicit %f0s
...
diff --git a/test/CodeGen/SystemZ/fp-conv-17.mir b/test/CodeGen/SystemZ/fp-conv-17.mir
index b72213a0671..17a5fe24b1d 100644
--- a/test/CodeGen/SystemZ/fp-conv-17.mir
+++ b/test/CodeGen/SystemZ/fp-conv-17.mir
@@ -129,74 +129,74 @@ body: |
%1 = COPY %r3d
%0 = COPY %r2d
- %2 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %3 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %4 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %5 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %6 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %7 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %8 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %9 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %10 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %11 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %12 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %13 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %14 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %15 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %16 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %17 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- %18 = LE %1, 0, _ :: (volatile load 4 from %ir.ptr2)
- STE %2, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %3, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %4, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %5, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %6, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %7, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %8, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %9, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %10, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %11, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %12, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %13, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %14, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %15, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %16, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %17, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
- STE %18, %1, 0, _ :: (volatile store 4 into %ir.ptr2)
+ %2 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %3 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %4 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %5 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %6 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %7 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %8 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %9 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %10 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %11 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %12 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %13 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %14 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %15 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %16 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %17 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ %18 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
+ STE %2, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %3, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %4, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %5, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %6, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %7, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %8, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %9, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %10, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %11, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %12, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %13, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %14, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %15, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %16, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %17, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ STE %18, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
%19 = LDEBR %2
- STD %19, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %19, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%20 = LDEBR %3
- STD %20, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %20, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%21 = LDEBR %4
- STD %21, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %21, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%22 = LDEBR %5
- STD %22, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %22, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%23 = LDEBR %6
- STD %23, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %23, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%24 = LDEBR %7
- STD %24, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %24, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%25 = LDEBR %8
- STD %25, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %25, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%26 = LDEBR %9
- STD %26, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %26, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%27 = LDEBR %10
- STD %27, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %27, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%28 = LDEBR %11
- STD %28, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %28, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%29 = LDEBR %12
- STD %29, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %29, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%30 = LDEBR %13
- STD %30, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %30, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%31 = LDEBR %14
- STD %31, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %31, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%32 = LDEBR %15
- STD %32, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %32, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%33 = LDEBR %16
- STD %33, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %33, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%34 = LDEBR %17
- STD %34, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %34, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
%35 = LDEBR %18
- STD %35, %0, 0, _ :: (volatile store 8 into %ir.ptr1)
+ STD %35, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
Return
...
diff --git a/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir b/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir
index 8798fcecfc3..2af8b3cce6f 100644
--- a/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir
+++ b/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir
@@ -29,6 +29,6 @@ body: |
%0.subreg_hl32 = COPY %0.subreg_l32
%1 = COPY %0.subreg_l64
%2 = LARL @g_167
- STC %1.subreg_l32, %2, 8, _
+ STC %1.subreg_l32, %2, 8, %noreg
...
diff --git a/test/CodeGen/Thumb/machine-cse-physreg.mir b/test/CodeGen/Thumb/machine-cse-physreg.mir
index 5206e89cf77..0e7bb6f63ab 100644
--- a/test/CodeGen/Thumb/machine-cse-physreg.mir
+++ b/test/CodeGen/Thumb/machine-cse-physreg.mir
@@ -21,15 +21,15 @@ body: |
bb.0:
liveins: %r0
%0 = COPY %r0
- %1, %cpsr = tLSLri %0, 2, 14, _
- tCMPi8 %0, 5, 14, _, implicit-def %cpsr
+ %1, %cpsr = tLSLri %0, 2, 14, %noreg
+ tCMPi8 %0, 5, 14, %noreg, implicit-def %cpsr
tBcc %bb.8, 8, %cpsr
bb.1:
- %2, %cpsr = tLSLri %0, 2, 14, _
+ %2, %cpsr = tLSLri %0, 2, 14, %noreg
bb.8:
liveins: %cpsr
%3 = COPY %cpsr
- tSTRi killed %3, %0, 0, 14, _
+ tSTRi killed %3, %0, 0, 14, %noreg
...
diff --git a/test/CodeGen/Thumb/tbb-reuse.mir b/test/CodeGen/Thumb/tbb-reuse.mir
index 7d15c7c3ca7..e84f32b6e85 100644
--- a/test/CodeGen/Thumb/tbb-reuse.mir
+++ b/test/CodeGen/Thumb/tbb-reuse.mir
@@ -108,44 +108,44 @@ body: |
successors: %bb.2.default(0x19999998), %bb.1.entry(0x66666668)
liveins: %r0, %r7, %lr
- frame-setup tPUSH 14, _, killed %r7, killed %lr, implicit-def %sp, implicit %sp
+ frame-setup tPUSH 14, %noreg, killed %r7, killed %lr, implicit-def %sp, implicit %sp
frame-setup CFI_INSTRUCTION def_cfa_offset 8
frame-setup CFI_INSTRUCTION offset %lr, -4
frame-setup CFI_INSTRUCTION offset %r7, -8
- %r1, dead %cpsr = tSUBi3 %r0, 1, 14, _
- tCMPi8 %r1, 3, 14, _, implicit-def %cpsr
+ %r1, dead %cpsr = tSUBi3 %r0, 1, 14, %noreg
+ tCMPi8 %r1, 3, 14, %noreg, implicit-def %cpsr
tBcc %bb.2.default, 8, killed %cpsr
bb.1.entry:
successors: %bb.3.lab1(0x20000000), %bb.4.lab2(0x20000000), %bb.5.lab3(0x20000000), %bb.6.lab4(0x20000000)
liveins: %r0, %r1
- %r1, dead %cpsr = tLSLri killed %r1, 2, 14, _
- %r2 = tLEApcrelJT %jump-table.0, 14, _
- %r2 = tLDRr killed %r1, killed %r2, 14, _ :: (load 4 from jump-table)
- %r1, dead %cpsr = tLSLri %r2, 2, 14, _
+ %r1, dead %cpsr = tLSLri killed %r1, 2, 14, %noreg
+ %r2 = tLEApcrelJT %jump-table.0, 14, %noreg
+ %r2 = tLDRr killed %r1, killed %r2, 14, %noreg :: (load 4 from jump-table)
+ %r1, dead %cpsr = tLSLri %r2, 2, 14, %noreg
tBR_JTr killed %r2, %jump-table.0
bb.2.default:
- tBL 14, _, @exit0, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
- tPOP_RET 14, _, def %r7, def %pc, implicit-def %sp, implicit %sp
+ tBL 14, %noreg, @exit0, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
+ tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp
bb.3.lab1:
liveins: %r0,%r1
- tBL 14, _, @exit1, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit-def %sp
- tPOP_RET 14, _, def %r7, def %pc, implicit-def %sp, implicit %sp
+ tBL 14, %noreg, @exit1, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit-def %sp
+ tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp
bb.4.lab2:
- tBL 14, _, @exit2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
- tPOP_RET 14, _, def %r7, def %pc, implicit-def %sp, implicit %sp
+ tBL 14, %noreg, @exit2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
+ tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp
bb.5.lab3:
- tBL 14, _, @exit3, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
- tPOP_RET 14, _, def %r7, def %pc, implicit-def %sp, implicit %sp
+ tBL 14, %noreg, @exit3, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
+ tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp
bb.6.lab4:
- tBL 14, _, @exit4, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
- tPOP_RET 14, _, def %r7, def %pc, implicit-def %sp, implicit %sp
+ tBL 14, %noreg, @exit4, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
+ tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp
...
diff --git a/test/CodeGen/Thumb2/bicbfi.ll b/test/CodeGen/Thumb2/bicbfi.ll
index fcdb1225db5..5f51f8c46f8 100644
--- a/test/CodeGen/Thumb2/bicbfi.ll
+++ b/test/CodeGen/Thumb2/bicbfi.ll
@@ -14,4 +14,4 @@ define void @f(i32* nocapture %b, i32* nocapture %c, i32 %a) {
%5 = add nsw i32 %4, %3
store i32 %5, i32* %b, align 4
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir b/test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir
index a44604372e6..5ba1fc174fe 100644
--- a/test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir
+++ b/test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir
@@ -6,23 +6,23 @@ body: |
successors: %bb.2, %bb.1
liveins: %d0, %r0, %r1
- t2CMPri killed %r1, 0, 14, _, implicit-def %cpsr
+ t2CMPri killed %r1, 0, 14, %noreg, implicit-def %cpsr
t2Bcc %bb.2, 0, killed %cpsr
bb.1:
liveins: %d0, %r0
- %d16 = VDUP32d killed %r0, 14, _
+ %d16 = VDUP32d killed %r0, 14, %noreg
; Verify that the neon instructions haven't been conditionalized:
; CHECK-LABEL: NeonVdupMul
; CHECK: vdup.32
; CHECK: vmul.i32
- %d0 = VMULv2i32 killed %d16, killed %d0, 14, _
+ %d0 = VMULv2i32 killed %d16, killed %d0, 14, %noreg
bb.2:
liveins: %d0
- tBX_RET 14, _, implicit %d0
+ tBX_RET 14, %noreg, implicit %d0
...
---
@@ -32,23 +32,23 @@ body: |
successors: %bb.1, %bb.2
liveins: %r0, %r1
- t2CMPri killed %r1, 0, 14, _, implicit-def %cpsr
+ t2CMPri killed %r1, 0, 14, %noreg, implicit-def %cpsr
t2Bcc %bb.2, 1, killed %cpsr
bb.1:
- %d0 = VMOVv2i32 0, 14, _
- tBX_RET 14, _, implicit %d0
+ %d0 = VMOVv2i32 0, 14, %noreg
+ tBX_RET 14, %noreg, implicit %d0
bb.2:
liveins: %r0
- %d0 = VLDRD killed %r0, 0, 14, _
+ %d0 = VLDRD killed %r0, 0, 14, %noreg
; Verify that the neon instruction VMOVv2i32 hasn't been conditionalized,
; but the VLDR instruction that is available both in the VFP and Advanced
; SIMD extensions has.
; CHECK-LABEL: NeonVmovVfpLdr
; CHECK-DAG: vmov.i32 d0, #0x0
; CHECK-DAG: vldr{{ne|eq}} d0, [r0]
- tBX_RET 14, _, implicit %d0
+ tBX_RET 14, %noreg, implicit %d0
...
diff --git a/test/CodeGen/Thumb2/tbb-removeadd.mir b/test/CodeGen/Thumb2/tbb-removeadd.mir
index 10606679134..9798401d9df 100644
--- a/test/CodeGen/Thumb2/tbb-removeadd.mir
+++ b/test/CodeGen/Thumb2/tbb-removeadd.mir
@@ -77,47 +77,47 @@ body: |
successors: %bb.6.sw.epilog(0x0ccccccb), %bb.1.entry(0x73333335)
liveins: %r0, %r1
- tCMPi8 %r0, 4, 14, _, implicit-def %cpsr
+ tCMPi8 %r0, 4, 14, %noreg, implicit-def %cpsr
t2Bcc %bb.6.sw.epilog, 8, killed %cpsr
bb.1.entry:
successors: %bb.2.sw.bb(0x1c71c71c), %bb.3.sw.bb1(0x1c71c71c), %bb.5.sw.epilog.sink.split(0x1c71c71c), %bb.6.sw.epilog(0x0e38e38e), %bb.4.sw.bb3(0x1c71c71c)
liveins: %r0, %r1
- %r2 = t2LEApcrelJT %jump-table.0, 14, _
- %r3 = t2ADDrs killed %r2, %r0, 18, 14, _, _
- %r2, dead %cpsr = tMOVi8 1, 14, _
+ %r2 = t2LEApcrelJT %jump-table.0, 14, %noreg
+ %r3 = t2ADDrs killed %r2, %r0, 18, 14, %noreg, %noreg
+ %r2, dead %cpsr = tMOVi8 1, 14, %noreg
t2BR_JT killed %r3, killed %r0, %jump-table.0
bb.2.sw.bb:
successors: %bb.5.sw.epilog.sink.split(0x80000000)
liveins: %r1
- %r2, dead %cpsr = tMOVi8 0, 14, _
- t2B %bb.5.sw.epilog.sink.split, 14, _
+ %r2, dead %cpsr = tMOVi8 0, 14, %noreg
+ t2B %bb.5.sw.epilog.sink.split, 14, %noreg
bb.3.sw.bb1:
successors: %bb.5.sw.epilog.sink.split(0x80000000)
liveins: %r1
- %r0, dead %cpsr = tMOVi8 0, 14, _
- %r2, dead %cpsr = tMOVi8 1, 14, _
- tSTRi killed %r0, %r1, 0, 14, _ :: (store 4 into %ir.p)
- t2B %bb.5.sw.epilog.sink.split, 14, _
+ %r0, dead %cpsr = tMOVi8 0, 14, %noreg
+ %r2, dead %cpsr = tMOVi8 1, 14, %noreg
+ tSTRi killed %r0, %r1, 0, 14, %noreg :: (store 4 into %ir.p)
+ t2B %bb.5.sw.epilog.sink.split, 14, %noreg
bb.4.sw.bb3:
successors: %bb.5.sw.epilog.sink.split(0x80000000)
liveins: %r1
- %r2, dead %cpsr = tMOVi8 2, 14, _
+ %r2, dead %cpsr = tMOVi8 2, 14, %noreg
bb.5.sw.epilog.sink.split:
successors: %bb.6.sw.epilog(0x80000000)
liveins: %r1, %r2
- tSTRi killed %r2, killed %r1, 0, 14, _ :: (store 4 into %ir.p)
+ tSTRi killed %r2, killed %r1, 0, 14, %noreg :: (store 4 into %ir.p)
bb.6.sw.epilog:
- tBX_RET 14, _
+ tBX_RET 14, %noreg
...
diff --git a/test/CodeGen/X86/GlobalISel/select-GV.mir b/test/CodeGen/X86/GlobalISel/select-GV.mir
index 7de74269ce7..0248ca28959 100644
--- a/test/CodeGen/X86/GlobalISel/select-GV.mir
+++ b/test/CodeGen/X86/GlobalISel/select-GV.mir
@@ -41,23 +41,23 @@ registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
# X64: %0:gr64 = IMPLICIT_DEF
-# X64-NEXT: %1:gr64 = LEA64r _, 1, _, @g_int, _
-# X64-NEXT: MOV64mr %0, 1, _, 0, _, %1 :: (store 8 into `i32** undef`)
+# X64-NEXT: %1:gr64 = LEA64r %noreg, 1, %noreg, @g_int, %noreg
+# X64-NEXT: MOV64mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`)
# X64-NEXT: RET 0
#
# X64_DARWIN_PIC: %0:gr64 = IMPLICIT_DEF
-# X64_DARWIN_PIC-NEXT: %1:gr64 = LEA64r %rip, 1, _, @g_int, _
-# X64_DARWIN_PIC-NEXT: MOV64mr %0, 1, _, 0, _, %1 :: (store 8 into `i32** undef`)
+# X64_DARWIN_PIC-NEXT: %1:gr64 = LEA64r %rip, 1, %noreg, @g_int, %noreg
+# X64_DARWIN_PIC-NEXT: MOV64mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`)
# X64_DARWIN_PIC-NEXT: RET 0
#
# X32: %0:gr32 = IMPLICIT_DEF
-# X32-NEXT: %1:gr32 = LEA32r _, 1, _, @g_int, _
-# X32-NEXT: MOV32mr %0, 1, _, 0, _, %1 :: (store 8 into `i32** undef`)
+# X32-NEXT: %1:gr32 = LEA32r %noreg, 1, %noreg, @g_int, %noreg
+# X32-NEXT: MOV32mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`)
# X32-NEXT: RET 0
#
# X32ABI: %0:low32_addr_access = IMPLICIT_DEF
-# X32ABI-NEXT: %1:gr32 = LEA64_32r _, 1, _, @g_int, _
-# X32ABI-NEXT: MOV32mr %0, 1, _, 0, _, %1 :: (store 8 into `i32** undef`)
+# X32ABI-NEXT: %1:gr32 = LEA64_32r %noreg, 1, %noreg, @g_int, %noreg
+# X32ABI-NEXT: MOV32mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`)
# X32ABI-NEXT: RET 0
body: |
bb.1.entry:
@@ -85,23 +85,23 @@ regBankSelected: true
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
-# X64: %1:gr64 = LEA64r _, 1, _, @g_int, _
-# X64-NEXT: %0:gr32 = MOV32rm %1, 1, _, 0, _ :: (load 4 from @g_int)
+# X64: %1:gr64 = LEA64r %noreg, 1, %noreg, @g_int, %noreg
+# X64-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int)
# X64-NEXT: %eax = COPY %0
# X64-NEXT: RET 0, implicit %eax
#
-# X64_DARWIN_PIC: %1:gr64 = LEA64r %rip, 1, _, @g_int, _
-# X64_DARWIN_PIC-NEXT: %0:gr32 = MOV32rm %1, 1, _, 0, _ :: (load 4 from @g_int)
+# X64_DARWIN_PIC: %1:gr64 = LEA64r %rip, 1, %noreg, @g_int, %noreg
+# X64_DARWIN_PIC-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int)
# X64_DARWIN_PIC-NEXT: %eax = COPY %0
# X64_DARWIN_PIC-NEXT: RET 0, implicit %eax
#
-# X32: %1:gr32 = LEA32r _, 1, _, @g_int, _
-# X32-NEXT: %0:gr32 = MOV32rm %1, 1, _, 0, _ :: (load 4 from @g_int)
+# X32: %1:gr32 = LEA32r %noreg, 1, %noreg, @g_int, %noreg
+# X32-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int)
# X32-NEXT: %eax = COPY %0
# X32-NEXT: RET 0, implicit %eax
#
-# X32ABI: %1:gr32 = LEA64_32r _, 1, _, @g_int, _
-# X32ABI-NEXT: %0:gr32 = MOV32rm %1, 1, _, 0, _ :: (load 4 from @g_int)
+# X32ABI: %1:gr32 = LEA64_32r %noreg, 1, %noreg, @g_int, %noreg
+# X32ABI-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int)
# X32ABI-NEXT: %eax = COPY %0
# X32ABI-NEXT: RET 0, implicit %eax
body: |
diff --git a/test/CodeGen/X86/GlobalISel/select-constant.mir b/test/CodeGen/X86/GlobalISel/select-constant.mir
index 5dffc33e9dd..b083288781c 100644
--- a/test/CodeGen/X86/GlobalISel/select-constant.mir
+++ b/test/CodeGen/X86/GlobalISel/select-constant.mir
@@ -177,7 +177,7 @@ body: |
; CHECK-LABEL: name: main
; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 0
- ; CHECK: MOV64mr [[COPY]], 1, _, 0, _, [[MOV64ri32_]] :: (store 8 into %ir.data)
+ ; CHECK: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[MOV64ri32_]] :: (store 8 into %ir.data)
; CHECK: RET 0
%0(p0) = COPY %rdi
%1(p0) = G_CONSTANT i64 0
diff --git a/test/CodeGen/X86/GlobalISel/select-fconstant.mir b/test/CodeGen/X86/GlobalISel/select-fconstant.mir
index 4e8f3daad2e..8855d2be68c 100644
--- a/test/CodeGen/X86/GlobalISel/select-fconstant.mir
+++ b/test/CodeGen/X86/GlobalISel/select-fconstant.mir
@@ -25,16 +25,16 @@ regBankSelected: true
tracksRegLiveness: true
registers:
- { id: 0, class: vecr, preferred-register: '' }
-# CHECK_SMALL64: %0:fr32 = MOVSSrm %rip, 1, _, %const.0, _
+# CHECK_SMALL64: %0:fr32 = MOVSSrm %rip, 1, %noreg, %const.0, %noreg
# CHECK_SMALL64-NEXT: %xmm0 = COPY %0
# CHECK_SMALL64-NEXT: RET 0, implicit %xmm0
#
# CHECK_LARGE64: %1:gr64 = MOV64ri %const.0
-# CHECK_LARGE64-NEXT: %0:fr32 = MOVSSrm %1, 1, _, 0, _ :: (load 8 from constant-pool, align 32)
+# CHECK_LARGE64-NEXT: %0:fr32 = MOVSSrm %1, 1, %noreg, 0, %noreg :: (load 8 from constant-pool, align 32)
# CHECK_LARGE64-NEXT: %xmm0 = COPY %0
# CHECK_LARGE64-NEXT: RET 0, implicit %xmm0
#
-# CHECK32: %0:fr32 = MOVSSrm _, 1, _, %const.0, _
+# CHECK32: %0:fr32 = MOVSSrm %noreg, 1, %noreg, %const.0, %noreg
# CHECK32-NEXT: %xmm0 = COPY %0
# CHECK32-NEXT: RET 0, implicit %xmm0
body: |
@@ -64,16 +64,16 @@ tracksRegLiveness: true
# CHECK32-NEXT: - { id: 0, class: fr64, preferred-register: '' }
registers:
- { id: 0, class: vecr, preferred-register: '' }
-# CHECK_SMALL64: %0:fr64 = MOVSDrm %rip, 1, _, %const.0, _
+# CHECK_SMALL64: %0:fr64 = MOVSDrm %rip, 1, %noreg, %const.0, %noreg
# CHECK_SMALL64-NEXT: %xmm0 = COPY %0
# CHECK_SMALL64-NEXT: RET 0, implicit %xmm0
#
# CHECK_LARGE64: %1:gr64 = MOV64ri %const.0
-# CHECK_LARGE64-NEXT: %0:fr64 = MOVSDrm %1, 1, _, 0, _ :: (load 8 from constant-pool, align 64)
+# CHECK_LARGE64-NEXT: %0:fr64 = MOVSDrm %1, 1, %noreg, 0, %noreg :: (load 8 from constant-pool, align 64)
# CHECK_LARGE64-NEXT: %xmm0 = COPY %0
# CHECK_LARGE64-NEXT: RET 0, implicit %xmm0
#
-# CHECK32: %0:fr64 = MOVSDrm _, 1, _, %const.0, _
+# CHECK32: %0:fr64 = MOVSDrm %noreg, 1, %noreg, %const.0, %noreg
# CHECK32-NEXT: %xmm0 = COPY %0
# CHECK32-NEXT: RET 0, implicit %xmm0
body: |
diff --git a/test/CodeGen/X86/GlobalISel/select-frameIndex.mir b/test/CodeGen/X86/GlobalISel/select-frameIndex.mir
index 1d641ba279a..5d0573ecc49 100644
--- a/test/CodeGen/X86/GlobalISel/select-frameIndex.mir
+++ b/test/CodeGen/X86/GlobalISel/select-frameIndex.mir
@@ -24,9 +24,9 @@ registers:
stack:
- { id: 0, name: ptr1, offset: 0, size: 4, alignment: 4 }
-# CHECK-X32: %0 = LEA32r %stack.0.ptr1, 1, _, 0, _
-# CHECK-X32ABI: %0 = LEA64_32r %stack.0.ptr1, 1, _, 0, _
-# CHECK-X64: %0 = LEA64r %stack.0.ptr1, 1, _, 0, _
+# CHECK-X32: %0 = LEA32r %stack.0.ptr1, 1, %noreg, 0, %noreg
+# CHECK-X32ABI: %0 = LEA64_32r %stack.0.ptr1, 1, %noreg, 0, %noreg
+# CHECK-X64: %0 = LEA64r %stack.0.ptr1, 1, %noreg, 0, %noreg
body: |
bb.1 (%ir-block.0):
%0(p0) = G_FRAME_INDEX %stack.0.ptr1
diff --git a/test/CodeGen/X86/GlobalISel/select-gep.mir b/test/CodeGen/X86/GlobalISel/select-gep.mir
index e66b25afc14..b78afd2803a 100644
--- a/test/CodeGen/X86/GlobalISel/select-gep.mir
+++ b/test/CodeGen/X86/GlobalISel/select-gep.mir
@@ -24,7 +24,7 @@ body: |
; CHECK-LABEL: name: test_gep_i32
; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64_nosp = MOV64ri32 20
- ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri32_]], 0, _
+ ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri32_]], 0, %noreg
; CHECK: %rax = COPY [[LEA64r]]
; CHECK: RET 0, implicit %rax
%0(p0) = COPY %rdi
diff --git a/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir b/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
index 0b7160d2bd9..804d7bce671 100644
--- a/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
+++ b/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
@@ -57,8 +57,8 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i8
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[MOV32rm]], 1, _, 0, _ :: (load 1 from %ir.p1)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1)
; ALL: %al = COPY [[MOV8rm]]
; ALL: RET 0, implicit %al
%1(p0) = G_FRAME_INDEX %fixed-stack.0
@@ -82,8 +82,8 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i16
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[MOV32rm]], 1, _, 0, _ :: (load 2 from %ir.p1)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1)
; ALL: %ax = COPY [[MOV16rm]]
; ALL: RET 0, implicit %ax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
@@ -107,8 +107,8 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i32
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, _, 0, _ :: (load 4 from %ir.p1)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
; ALL: %eax = COPY [[MOV32rm1]]
; ALL: RET 0, implicit %eax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
@@ -134,9 +134,9 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i8
- ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 1 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
- ; ALL: MOV8mr [[MOV32rm]], 1, _, 0, _, [[MOV8rm]] :: (store 1 into %ir.p1)
+ ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 1 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: MOV8mr [[MOV32rm]], 1, %noreg, 0, %noreg, [[MOV8rm]] :: (store 1 into %ir.p1)
; ALL: %eax = COPY [[MOV32rm]]
; ALL: RET 0, implicit %eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
@@ -164,9 +164,9 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i16
- ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 2 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
- ; ALL: MOV16mr [[MOV32rm]], 1, _, 0, _, [[MOV16rm]] :: (store 2 into %ir.p1)
+ ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 2 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: MOV16mr [[MOV32rm]], 1, %noreg, 0, %noreg, [[MOV16rm]] :: (store 2 into %ir.p1)
; ALL: %eax = COPY [[MOV32rm]]
; ALL: RET 0, implicit %eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
@@ -194,9 +194,9 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i32
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
- ; ALL: MOV32mr [[MOV32rm1]], 1, _, 0, _, [[MOV32rm]] :: (store 4 into %ir.p1)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: MOV32mr [[MOV32rm1]], 1, %noreg, 0, %noreg, [[MOV32rm]] :: (store 4 into %ir.p1)
; ALL: %eax = COPY [[MOV32rm1]]
; ALL: RET 0, implicit %eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
@@ -222,8 +222,8 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_ptr
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, _, 0, _ :: (load 4 from %ir.ptr1)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr1)
; ALL: %eax = COPY [[MOV32rm1]]
; ALL: RET 0, implicit %eax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
@@ -249,9 +249,9 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_ptr
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
- ; ALL: MOV32mr [[MOV32rm]], 1, _, 0, _, [[MOV32rm1]] :: (store 4 into %ir.ptr1)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: MOV32mr [[MOV32rm]], 1, %noreg, 0, %noreg, [[MOV32rm1]] :: (store 4 into %ir.ptr1)
; ALL: RET 0
%2(p0) = G_FRAME_INDEX %fixed-stack.1
%0(p0) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
diff --git a/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir b/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
index 6e85fb9ed9b..35e1659a53c 100644
--- a/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
+++ b/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
@@ -110,7 +110,7 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
# ALL: %0:gr64 = COPY %rdi
-# ALL: %1:gr8 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.p1)
+# ALL: %1:gr8 = MOV8rm %0, 1, %noreg, 0, %noreg :: (load 1 from %ir.p1)
# ALL: %al = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -134,7 +134,7 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
# ALL: %0:gr64 = COPY %rdi
-# ALL: %1:gr16 = MOV16rm %0, 1, _, 0, _ :: (load 2 from %ir.p1)
+# ALL: %1:gr16 = MOV16rm %0, 1, %noreg, 0, %noreg :: (load 2 from %ir.p1)
# ALL: %ax = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -158,7 +158,7 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
# ALL: %0:gr64 = COPY %rdi
-# ALL: %1:gr32 = MOV32rm %0, 1, _, 0, _ :: (load 4 from %ir.p1)
+# ALL: %1:gr32 = MOV32rm %0, 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
# ALL: %eax = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -182,7 +182,7 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
# ALL: %0:gr64 = COPY %rdi
-# ALL: %1:gr64 = MOV64rm %0, 1, _, 0, _ :: (load 8 from %ir.p1)
+# ALL: %1:gr64 = MOV64rm %0, 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
# ALL: %rax = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -206,7 +206,7 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
# ALL: %0:gr64 = COPY %rdi
-# ALL: %1:gr32 = MOV32rm %0, 1, _, 0, _ :: (load 4 from %ir.p1)
+# ALL: %1:gr32 = MOV32rm %0, 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
# ALL: %xmm0 = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -228,9 +228,9 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
# ALL: %0:gr64 = COPY %rdi
-# SSE: %1:fr32 = MOVSSrm %0, 1, _, 0, _ :: (load 4 from %ir.p1)
-# AVX: %1:fr32 = VMOVSSrm %0, 1, _, 0, _ :: (load 4 from %ir.p1)
-# AVX512ALL: %1:fr32x = VMOVSSZrm %0, 1, _, 0, _ :: (load 4 from %ir.p1)
+# SSE: %1:fr32 = MOVSSrm %0, 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
+# AVX: %1:fr32 = VMOVSSrm %0, 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
+# AVX512ALL: %1:fr32x = VMOVSSZrm %0, 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
# ALL: %xmm0 = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -254,7 +254,7 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
# ALL: %0:gr64 = COPY %rdi
-# ALL: %1:gr64 = MOV64rm %0, 1, _, 0, _ :: (load 8 from %ir.p1)
+# ALL: %1:gr64 = MOV64rm %0, 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
# ALL: %xmm0 = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -276,9 +276,9 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
# ALL: %0:gr64 = COPY %rdi
-# SSE: %1:fr64 = MOVSDrm %0, 1, _, 0, _ :: (load 8 from %ir.p1)
-# AVX: %1:fr64 = VMOVSDrm %0, 1, _, 0, _ :: (load 8 from %ir.p1)
-# AVX512ALL: %1:fr64x = VMOVSDZrm %0, 1, _, 0, _ :: (load 8 from %ir.p1)
+# SSE: %1:fr64 = MOVSDrm %0, 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
+# AVX: %1:fr64 = VMOVSDrm %0, 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
+# AVX512ALL: %1:fr64x = VMOVSDZrm %0, 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
# ALL: %xmm0 = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -303,7 +303,7 @@ registers:
- { id: 1, class: gpr }
# ALL: %0:gr32 = COPY %edi
# ALL: %1:gr64 = COPY %rsi
-# ALL: MOV32mr %1, 1, _, 0, _, %0 :: (store 4 into %ir.p1)
+# ALL: MOV32mr %1, 1, %noreg, 0, %noreg, %0 :: (store 4 into %ir.p1)
# ALL: %rax = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -329,7 +329,7 @@ registers:
- { id: 1, class: gpr }
# ALL: %0:gr64 = COPY %rdi
# ALL: %1:gr64 = COPY %rsi
-# ALL: MOV64mr %1, 1, _, 0, _, %0 :: (store 8 into %ir.p1)
+# ALL: MOV64mr %1, 1, %noreg, 0, %noreg, %0 :: (store 8 into %ir.p1)
# ALL: %rax = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -356,7 +356,7 @@ registers:
# AVX512ALL: %0:fr32x = COPY %xmm0
# ALL: %1:gr64 = COPY %rdi
# ALL: %2:gr32 = COPY %0
-# ALL: MOV32mr %1, 1, _, 0, _, %2 :: (store 4 into %ir.p1)
+# ALL: MOV32mr %1, 1, %noreg, 0, %noreg, %2 :: (store 4 into %ir.p1)
# ALL: %rax = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -382,9 +382,9 @@ registers:
# NO_AVX512F: %0:fr32 = COPY %xmm0
# AVX512ALL: %0:fr32x = COPY %xmm0
# ALL: %1:gr64 = COPY %rdi
-# SSE: MOVSSmr %1, 1, _, 0, _, %0 :: (store 4 into %ir.p1)
-# AVX: VMOVSSmr %1, 1, _, 0, _, %0 :: (store 4 into %ir.p1)
-# AVX512ALL: VMOVSSZmr %1, 1, _, 0, _, %0 :: (store 4 into %ir.p1)
+# SSE: MOVSSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 4 into %ir.p1)
+# AVX: VMOVSSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 4 into %ir.p1)
+# AVX512ALL: VMOVSSZmr %1, 1, %noreg, 0, %noreg, %0 :: (store 4 into %ir.p1)
# ALL: %rax = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -411,7 +411,7 @@ registers:
# AVX512ALL: %0:fr64x = COPY %xmm0
# ALL: %1:gr64 = COPY %rdi
# ALL: %2:gr64 = COPY %0
-# ALL: MOV64mr %1, 1, _, 0, _, %2 :: (store 8 into %ir.p1)
+# ALL: MOV64mr %1, 1, %noreg, 0, %noreg, %2 :: (store 8 into %ir.p1)
# ALL: %rax = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -437,9 +437,9 @@ registers:
# NO_AVX512F: %0:fr64 = COPY %xmm0
# AVX512ALL: %0:fr64x = COPY %xmm0
# ALL: %1:gr64 = COPY %rdi
-# SSE: MOVSDmr %1, 1, _, 0, _, %0 :: (store 8 into %ir.p1)
-# AVX: VMOVSDmr %1, 1, _, 0, _, %0 :: (store 8 into %ir.p1)
-# AVX512ALL: VMOVSDZmr %1, 1, _, 0, _, %0 :: (store 8 into %ir.p1)
+# SSE: MOVSDmr %1, 1, %noreg, 0, %noreg, %0 :: (store 8 into %ir.p1)
+# AVX: VMOVSDmr %1, 1, %noreg, 0, %noreg, %0 :: (store 8 into %ir.p1)
+# AVX512ALL: VMOVSDZmr %1, 1, %noreg, 0, %noreg, %0 :: (store 8 into %ir.p1)
# ALL: %rax = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -464,7 +464,7 @@ registers:
# ALL: - { id: 1, class: gr64, preferred-register: '' }
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# ALL: %1:gr64 = MOV64rm %0, 1, _, 0, _ :: (load 8 from %ir.ptr1)
+# ALL: %1:gr64 = MOV64rm %0, 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1)
body: |
bb.1 (%ir-block.0):
liveins: %rdi
@@ -487,7 +487,7 @@ registers:
# ALL: - { id: 1, class: gr64, preferred-register: '' }
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# ALL: MOV64mr %0, 1, _, 0, _, %1 :: (store 8 into %ir.ptr1)
+# ALL: MOV64mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into %ir.ptr1)
body: |
bb.1 (%ir-block.0):
liveins: %rdi, %rsi
@@ -518,8 +518,8 @@ registers:
- { id: 4, class: gpr }
# ALL: %0:gr64 = COPY %rdi
# ALL-NEXT: %1:gr32 = COPY %esi
-# ALL-NEXT: MOV32mr %0, 1, _, 20, _, %1 :: (store 4 into %ir.arrayidx)
-# ALL-NEXT: %4:gr32 = MOV32rm %0, 1, _, 20, _ :: (load 4 from %ir.arrayidx)
+# ALL-NEXT: MOV32mr %0, 1, %noreg, 20, %noreg, %1 :: (store 4 into %ir.arrayidx)
+# ALL-NEXT: %4:gr32 = MOV32rm %0, 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx)
# ALL-NEXT: %eax = COPY %4
# ALL-NEXT: RET 0, implicit %eax
body: |
@@ -557,9 +557,9 @@ registers:
# ALL: %0:gr64 = COPY %rdi
# ALL-NEXT: %1:gr32 = COPY %esi
# ALL-NEXT: %2:gr64_nosp = MOV64ri 228719476720
-# ALL-NEXT: %3:gr64 = LEA64r %0, 1, %2, 0, _
-# ALL-NEXT: MOV32mr %3, 1, _, 0, _, %1 :: (store 4 into %ir.arrayidx)
-# ALL-NEXT: %4:gr32 = MOV32rm %3, 1, _, 0, _ :: (load 4 from %ir.arrayidx)
+# ALL-NEXT: %3:gr64 = LEA64r %0, 1, %2, 0, %noreg
+# ALL-NEXT: MOV32mr %3, 1, %noreg, 0, %noreg, %1 :: (store 4 into %ir.arrayidx)
+# ALL-NEXT: %4:gr32 = MOV32rm %3, 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx)
# ALL-NEXT: %eax = COPY %4
# ALL-NEXT: RET 0, implicit %eax
body: |
diff --git a/test/CodeGen/X86/GlobalISel/select-memop-v128.mir b/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
index 4edab36b57c..7a3647c3e5c 100644
--- a/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
+++ b/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
@@ -35,10 +35,10 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
# ALL: %0:gr64 = COPY %rdi
-# SSE: %1:vr128 = MOVUPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
-# AVX: %1:vr128 = VMOVUPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
-# AVX512F: %1:vr128x = VMOVUPSZ128rm_NOVLX %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
-# AVX512VL: %1:vr128x = VMOVUPSZ128rm %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
+# SSE: %1:vr128 = MOVUPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1)
+# AVX: %1:vr128 = VMOVUPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1)
+# AVX512F: %1:vr128x = VMOVUPSZ128rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1)
+# AVX512VL: %1:vr128x = VMOVUPSZ128rm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1)
# ALL: %xmm0 = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -60,10 +60,10 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
# ALL: %0:gr64 = COPY %rdi
-# SSE: %1:vr128 = MOVAPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1)
-# AVX: %1:vr128 = VMOVAPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1)
-# AVX512F: %1:vr128x = VMOVAPSZ128rm_NOVLX %0, 1, _, 0, _ :: (load 16 from %ir.p1)
-# AVX512VL: %1:vr128x = VMOVAPSZ128rm %0, 1, _, 0, _ :: (load 16 from %ir.p1)
+# SSE: %1:vr128 = MOVAPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1)
+# AVX: %1:vr128 = VMOVAPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1)
+# AVX512F: %1:vr128x = VMOVAPSZ128rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1)
+# AVX512VL: %1:vr128x = VMOVAPSZ128rm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1)
# ALL: %xmm0 = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -87,10 +87,10 @@ registers:
# NO_AVX512F: %0:vr128 = COPY %xmm0
# AVX512ALL: %0:vr128x = COPY %xmm0
# ALL: %1:gr64 = COPY %rdi
-# SSE: MOVAPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
-# AVX: VMOVAPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
-# AVX512F: VMOVAPSZ128mr_NOVLX %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
-# AVX512VL: VMOVAPSZ128mr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
+# SSE: MOVAPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1)
+# AVX: VMOVAPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1)
+# AVX512F: VMOVAPSZ128mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1)
+# AVX512VL: VMOVAPSZ128mr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1)
# ALL: %rax = COPY %1
body: |
bb.1 (%ir-block.0):
@@ -115,10 +115,10 @@ registers:
# NO_AVX512F: %0:vr128 = COPY %xmm0
# AVX512ALL: %0:vr128x = COPY %xmm0
# ALL: %1:gr64 = COPY %rdi
-# SSE: MOVUPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
-# AVX: VMOVUPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
-# AVX512F: VMOVUPSZ128mr_NOVLX %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
-# AVX512VL: VMOVUPSZ128mr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
+# SSE: MOVUPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1)
+# AVX: VMOVUPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1)
+# AVX512F: VMOVUPSZ128mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1)
+# AVX512VL: VMOVUPSZ128mr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1)
# ALL: %rax = COPY %1
body: |
bb.1 (%ir-block.0):
diff --git a/test/CodeGen/X86/GlobalISel/select-memop-v256.mir b/test/CodeGen/X86/GlobalISel/select-memop-v256.mir
index 86310d25760..962201f5f54 100644
--- a/test/CodeGen/X86/GlobalISel/select-memop-v256.mir
+++ b/test/CodeGen/X86/GlobalISel/select-memop-v256.mir
@@ -43,17 +43,17 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
# NO_AVX512F: %0:gr64 = COPY %rdi
-# NO_AVX512F-NEXT: %1:vr256 = VMOVUPSYrm %0, 1, _, 0, _ :: (load 32 from %ir.p1, align 1)
+# NO_AVX512F-NEXT: %1:vr256 = VMOVUPSYrm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1, align 1)
# NO_AVX512F-NEXT: %ymm0 = COPY %1
# NO_AVX512F-NEXT: RET 0, implicit %ymm0
#
# AVX512F: %0:gr64 = COPY %rdi
-# AVX512F-NEXT: %1:vr256x = VMOVUPSZ256rm_NOVLX %0, 1, _, 0, _ :: (load 32 from %ir.p1, align 1)
+# AVX512F-NEXT: %1:vr256x = VMOVUPSZ256rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1, align 1)
# AVX512F-NEXT: %ymm0 = COPY %1
# AVX512F-NEXT: RET 0, implicit %ymm0
#
# AVX512VL: %0:gr64 = COPY %rdi
-# AVX512VL-NEXT: %1:vr256x = VMOVUPSZ256rm %0, 1, _, 0, _ :: (load 32 from %ir.p1, align 1)
+# AVX512VL-NEXT: %1:vr256x = VMOVUPSZ256rm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1, align 1)
# AVX512VL-NEXT: %ymm0 = COPY %1
# AVX512VL-NEXT: RET 0, implicit %ymm0
body: |
@@ -76,17 +76,17 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
# NO_AVX512F: %0:gr64 = COPY %rdi
-# NO_AVX512F-NEXT: %1:vr256 = VMOVAPSYrm %0, 1, _, 0, _ :: (load 32 from %ir.p1)
+# NO_AVX512F-NEXT: %1:vr256 = VMOVAPSYrm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1)
# NO_AVX512F-NEXT: %ymm0 = COPY %1
# NO_AVX512F-NEXT: RET 0, implicit %ymm0
#
# AVX512F: %0:gr64 = COPY %rdi
-# AVX512F-NEXT: %1:vr256x = VMOVAPSZ256rm_NOVLX %0, 1, _, 0, _ :: (load 32 from %ir.p1)
+# AVX512F-NEXT: %1:vr256x = VMOVAPSZ256rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1)
# AVX512F-NEXT: %ymm0 = COPY %1
# AVX512F-NEXT: RET 0, implicit %ymm0
#
# AVX512VL: %0:gr64 = COPY %rdi
-# AVX512VL-NEXT: %1:vr256x = VMOVAPSZ256rm %0, 1, _, 0, _ :: (load 32 from %ir.p1)
+# AVX512VL-NEXT: %1:vr256x = VMOVAPSZ256rm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1)
# AVX512VL-NEXT: %ymm0 = COPY %1
# AVX512VL-NEXT: RET 0, implicit %ymm0
body: |
@@ -117,17 +117,17 @@ registers:
- { id: 1, class: gpr }
# NO_AVX512F: %0:vr256 = COPY %ymm0
# NO_AVX512F-NEXT: %1:gr64 = COPY %rdi
-# NO_AVX512F-NEXT: VMOVUPSYmr %1, 1, _, 0, _, %0 :: (store 32 into %ir.p1, align 1)
+# NO_AVX512F-NEXT: VMOVUPSYmr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1, align 1)
# NO_AVX512F-NEXT: RET 0
#
# AVX512F: %0:vr256x = COPY %ymm0
# AVX512F-NEXT: %1:gr64 = COPY %rdi
-# AVX512F-NEXT: VMOVUPSZ256mr_NOVLX %1, 1, _, 0, _, %0 :: (store 32 into %ir.p1, align 1)
+# AVX512F-NEXT: VMOVUPSZ256mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1, align 1)
# AVX512F-NEXT: RET 0
#
# AVX512VL: %0:vr256x = COPY %ymm0
# AVX512VL-NEXT: %1:gr64 = COPY %rdi
-# AVX512VL-NEXT: VMOVUPSZ256mr %1, 1, _, 0, _, %0 :: (store 32 into %ir.p1, align 1)
+# AVX512VL-NEXT: VMOVUPSZ256mr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1, align 1)
# AVX512VL-NEXT: RET 0
body: |
bb.1 (%ir-block.0):
@@ -157,17 +157,17 @@ registers:
- { id: 1, class: gpr }
# NO_AVX512F: %0:vr256 = COPY %ymm0
# NO_AVX512F-NEXT: %1:gr64 = COPY %rdi
-# NO_AVX512F-NEXT: VMOVAPSYmr %1, 1, _, 0, _, %0 :: (store 32 into %ir.p1)
+# NO_AVX512F-NEXT: VMOVAPSYmr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1)
# NO_AVX512F-NEXT: RET 0
#
# AVX512F: %0:vr256x = COPY %ymm0
# AVX512F-NEXT: %1:gr64 = COPY %rdi
-# AVX512F-NEXT: VMOVAPSZ256mr_NOVLX %1, 1, _, 0, _, %0 :: (store 32 into %ir.p1)
+# AVX512F-NEXT: VMOVAPSZ256mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1)
# AVX512F-NEXT: RET 0
#
# AVX512VL: %0:vr256x = COPY %ymm0
# AVX512VL-NEXT: %1:gr64 = COPY %rdi
-# AVX512VL-NEXT: VMOVAPSZ256mr %1, 1, _, 0, _, %0 :: (store 32 into %ir.p1)
+# AVX512VL-NEXT: VMOVAPSZ256mr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1)
# AVX512VL-NEXT: RET 0
body: |
bb.1 (%ir-block.0):
diff --git a/test/CodeGen/X86/GlobalISel/select-memop-v512.mir b/test/CodeGen/X86/GlobalISel/select-memop-v512.mir
index e1b25903f06..8be5c940eff 100644
--- a/test/CodeGen/X86/GlobalISel/select-memop-v512.mir
+++ b/test/CodeGen/X86/GlobalISel/select-memop-v512.mir
@@ -36,7 +36,7 @@ body: |
; AVX512F-LABEL: name: test_load_v16i32_noalign
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, _, 0, _ :: (load 64 from %ir.p1, align 1)
+ ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 64 from %ir.p1, align 1)
; AVX512F: %zmm0 = COPY [[VMOVUPSZrm]]
; AVX512F: RET 0, implicit %zmm0
%0(p0) = COPY %rdi
@@ -59,7 +59,7 @@ body: |
; AVX512F-LABEL: name: test_load_v16i32_align
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, _, 0, _ :: (load 64 from %ir.p1, align 32)
+ ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 64 from %ir.p1, align 32)
; AVX512F: %zmm0 = COPY [[VMOVUPSZrm]]
; AVX512F: RET 0, implicit %zmm0
%0(p0) = COPY %rdi
@@ -83,7 +83,7 @@ body: |
; AVX512F-LABEL: name: test_store_v16i32_noalign
; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: VMOVUPSZmr [[COPY1]], 1, _, 0, _, [[COPY]] :: (store 64 into %ir.p1, align 1)
+ ; AVX512F: VMOVUPSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 64 into %ir.p1, align 1)
; AVX512F: RET 0
%0(<16 x s32>) = COPY %zmm0
%1(p0) = COPY %rdi
@@ -106,7 +106,7 @@ body: |
; AVX512F-LABEL: name: test_store_v16i32_align
; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: VMOVUPSZmr [[COPY1]], 1, _, 0, _, [[COPY]] :: (store 64 into %ir.p1, align 32)
+ ; AVX512F: VMOVUPSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 64 into %ir.p1, align 32)
; AVX512F: RET 0
%0(<16 x s32>) = COPY %zmm0
%1(p0) = COPY %rdi
diff --git a/test/CodeGen/X86/block-placement.mir b/test/CodeGen/X86/block-placement.mir
index c0cd7057d5c..61af79d16f5 100644
--- a/test/CodeGen/X86/block-placement.mir
+++ b/test/CodeGen/X86/block-placement.mir
@@ -46,7 +46,7 @@ liveins:
- { reg: '%rdi' }
- { reg: '%esi' }
-# CHECK: %eax = FAULTING_OP 1, %bb.3.null, 1684, killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr)
+# CHECK: %eax = FAULTING_OP 1, %bb.3.null, 1684, killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
# CHECK-NEXT: JMP_1 %bb.2.not_null
# CHECK: bb.3.null:
# CHECK: bb.4.right:
@@ -66,7 +66,7 @@ body: |
successors: %bb.2.null(0x7ffff800), %bb.4.not_null(0x00000800)
liveins: %rdi
- %eax = FAULTING_OP 1, %bb.2.null, 1684, killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr)
+ %eax = FAULTING_OP 1, %bb.2.null, 1684, killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
JMP_1 %bb.4.not_null
bb.4.not_null:
diff --git a/test/CodeGen/X86/conditional-tailcall-samedest.mir b/test/CodeGen/X86/conditional-tailcall-samedest.mir
index c18a98be53f..a632ba99743 100644
--- a/test/CodeGen/X86/conditional-tailcall-samedest.mir
+++ b/test/CodeGen/X86/conditional-tailcall-samedest.mir
@@ -118,7 +118,7 @@ body: |
bb.2.sw.bb:
successors: %bb.3.init.check.i(0x00000800), %bb.6.return(0x7ffff800)
- %al = ACQUIRE_MOV8rm %rip, 1, _, @static_local_guard, _ :: (volatile load acquire 1 from `i8* bitcast (i64* @static_local_guard to i8*)`, align 8)
+ %al = ACQUIRE_MOV8rm %rip, 1, %noreg, @static_local_guard, %noreg :: (volatile load acquire 1 from `i8* bitcast (i64* @static_local_guard to i8*)`, align 8)
TEST8rr killed %al, %al, implicit-def %eflags
JNE_1 %bb.6.return, implicit killed %eflags
JMP_1 %bb.3.init.check.i
diff --git a/test/CodeGen/X86/domain-reassignment.mir b/test/CodeGen/X86/domain-reassignment.mir
index cd678d2b952..98ac2fd495f 100644
--- a/test/CodeGen/X86/domain-reassignment.mir
+++ b/test/CodeGen/X86/domain-reassignment.mir
@@ -165,7 +165,7 @@ body: |
%21 = IMPLICIT_DEF
%20 = VMOVSSZrrk %19, killed %18, killed %21, %5
%22 = COPY %20
- VMOVSSZmr %4, 1, _, 0, _, killed %22 :: (store 4 into %ir.fptr)
+ VMOVSSZmr %4, 1, %noreg, 0, %noreg, killed %22 :: (store 4 into %ir.fptr)
RET 0
...
@@ -261,7 +261,7 @@ body: |
%9 = INSERT_SUBREG %8, %18, 1
%10 = COPY %9
%11 = VMOVAPDZrrk %2, killed %10, %1
- VMOVAPDZmr %0, 1, _, 0, _, killed %11
+ VMOVAPDZmr %0, 1, %noreg, 0, %noreg, killed %11
; CHECK: KTESTBrr %18, %18, implicit-def %eflags
TEST8rr %18, %18, implicit-def %eflags
@@ -363,7 +363,7 @@ body: |
%9 = INSERT_SUBREG %8, %17, 3
%10 = COPY %9
%11 = VMOVAPSZrrk %2, killed %10, %1
- VMOVAPSZmr %0, 1, _, 0, _, killed %11
+ VMOVAPSZmr %0, 1, %noreg, 0, %noreg, killed %11
; CHECK: KTESTWrr %17, %17, implicit-def %eflags
TEST16rr %17, %17, implicit-def %eflags
@@ -432,7 +432,7 @@ body: |
%1 = COPY %zmm0
%2 = COPY %zmm1
- ; CHECK: %5:vk32 = KMOVDkm %0, 1, _, 0, _
+ ; CHECK: %5:vk32 = KMOVDkm %0, 1, %noreg, 0, %noreg
; CHECK: %6:vk32 = KSHIFTRDri %5, 2
; CHECK: %7:vk32 = KSHIFTLDri %6, 1
; CHECK: %8:vk32 = KNOTDrr %7
@@ -441,7 +441,7 @@ body: |
; CHECK: %11:vk32 = KXORDrr %10, %6
; CHECK: %12:vk32 = KANDNDrr %11, %9
; CHECK: %13:vk32 = KADDDrr %12, %11
- %5 = MOV32rm %0, 1, _, 0, _
+ %5 = MOV32rm %0, 1, %noreg, 0, %noreg
%6 = SHR32ri %5, 2, implicit-def dead %eflags
%7 = SHL32ri %6, 1, implicit-def dead %eflags
%8 = NOT32r %7
@@ -454,7 +454,7 @@ body: |
; CHECK: %3:vk32wm = COPY %13
%3 = COPY %13
%4 = VMOVDQU16Zrrk %2, killed %3, %1
- VMOVDQA32Zmr %0, 1, _, 0, _, killed %4
+ VMOVDQA32Zmr %0, 1, %noreg, 0, %noreg, killed %4
; CHECK: KTESTDrr %13, %13, implicit-def %eflags
TEST32rr %13, %13, implicit-def %eflags
@@ -523,7 +523,7 @@ body: |
%1 = COPY %zmm0
%2 = COPY %zmm1
- ; CHECK: %5:vk64 = KMOVQkm %0, 1, _, 0, _
+ ; CHECK: %5:vk64 = KMOVQkm %0, 1, %noreg, 0, %noreg
; CHECK: %6:vk64 = KSHIFTRQri %5, 2
; CHECK: %7:vk64 = KSHIFTLQri %6, 1
; CHECK: %8:vk64 = KNOTQrr %7
@@ -532,7 +532,7 @@ body: |
; CHECK: %11:vk64 = KXORQrr %10, %6
; CHECK: %12:vk64 = KANDNQrr %11, %9
; CHECK: %13:vk64 = KADDQrr %12, %11
- %5 = MOV64rm %0, 1, _, 0, _
+ %5 = MOV64rm %0, 1, %noreg, 0, %noreg
%6 = SHR64ri %5, 2, implicit-def dead %eflags
%7 = SHL64ri %6, 1, implicit-def dead %eflags
%8 = NOT64r %7
@@ -545,7 +545,7 @@ body: |
; CHECK: %3:vk64wm = COPY %13
%3 = COPY %13
%4 = VMOVDQU8Zrrk %2, killed %3, %1
- VMOVDQA32Zmr %0, 1, _, 0, _, killed %4
+ VMOVDQA32Zmr %0, 1, %noreg, 0, %noreg, killed %4
; CHECK: KTESTQrr %13, %13, implicit-def %eflags
TEST64rr %13, %13, implicit-def %eflags
@@ -607,16 +607,16 @@ body: |
%1 = COPY %zmm0
%2 = COPY %zmm1
- ; CHECK: %7:vk8 = KMOVBkm %0, 1, _, 0, _
+ ; CHECK: %7:vk8 = KMOVBkm %0, 1, %noreg, 0, %noreg
; CHECK: %5:vk16 = COPY %7
; CHECK: %6:vk16 = KNOTWrr %5
- %5 = MOVZX16rm8 %0, 1, _, 0, _
+ %5 = MOVZX16rm8 %0, 1, %noreg, 0, %noreg
%6 = NOT16r %5
; CHECK: %3:vk16wm = COPY %6
%3 = COPY %6
%4 = VMOVAPSZrrk %2, killed %3, %1
- VMOVAPSZmr %0, 1, _, 0, _, killed %4
+ VMOVAPSZmr %0, 1, %noreg, 0, %noreg, killed %4
RET 0
...
@@ -670,19 +670,19 @@ body: |
%1 = COPY %zmm0
%2 = COPY %zmm1
- ; CHECK: %8:vk8 = KMOVBkm %0, 1, _, 0, _
+ ; CHECK: %8:vk8 = KMOVBkm %0, 1, %noreg, 0, %noreg
; CHECK: %5:vk32 = COPY %8
- ; CHECK: %9:vk16 = KMOVWkm %0, 1, _, 0, _
+ ; CHECK: %9:vk16 = KMOVWkm %0, 1, %noreg, 0, %noreg
; CHECK: %6:vk32 = COPY %9
; CHECK: %7:vk32 = KADDDrr %5, %6
- %5 = MOVZX32rm8 %0, 1, _, 0, _
- %6 = MOVZX32rm16 %0, 1, _, 0, _
+ %5 = MOVZX32rm8 %0, 1, %noreg, 0, %noreg
+ %6 = MOVZX32rm16 %0, 1, %noreg, 0, %noreg
%7 = ADD32rr %5, %6, implicit-def dead %eflags
; CHECK: %3:vk64wm = COPY %7
%3 = COPY %7
%4 = VMOVDQU16Zrrk %2, killed %3, %1
- VMOVDQA32Zmr %0, 1, _, 0, _, killed %4
+ VMOVDQA32Zmr %0, 1, %noreg, 0, %noreg, killed %4
RET 0
...
@@ -736,19 +736,19 @@ body: |
%1 = COPY %zmm0
%2 = COPY %zmm1
- ; CHECK: %8:vk8 = KMOVBkm %0, 1, _, 0, _
+ ; CHECK: %8:vk8 = KMOVBkm %0, 1, %noreg, 0, %noreg
; CHECK: %5:vk64 = COPY %8
- ; CHECK: %9:vk16 = KMOVWkm %0, 1, _, 0, _
+ ; CHECK: %9:vk16 = KMOVWkm %0, 1, %noreg, 0, %noreg
; CHECK: %6:vk64 = COPY %9
; CHECK: %7:vk64 = KADDQrr %5, %6
- %5 = MOVZX64rm8 %0, 1, _, 0, _
- %6 = MOVZX64rm16 %0, 1, _, 0, _
+ %5 = MOVZX64rm8 %0, 1, %noreg, 0, %noreg
+ %6 = MOVZX64rm16 %0, 1, %noreg, 0, %noreg
%7 = ADD64rr %5, %6, implicit-def dead %eflags
; CHECK: %3:vk64wm = COPY %7
%3 = COPY %7
%4 = VMOVDQU8Zrrk %2, killed %3, %1
- VMOVDQA32Zmr %0, 1, _, 0, _, killed %4
+ VMOVDQA32Zmr %0, 1, %noreg, 0, %noreg, killed %4
RET 0
...
diff --git a/test/CodeGen/X86/dynamic-alloca-lifetime.ll b/test/CodeGen/X86/dynamic-alloca-lifetime.ll
index 996eec05163..4f3238b4477 100644
--- a/test/CodeGen/X86/dynamic-alloca-lifetime.ll
+++ b/test/CodeGen/X86/dynamic-alloca-lifetime.ll
@@ -41,4 +41,4 @@ if.else130: ; preds = %bb1
declare void @bar()
attributes #0 = { nounwind }
-attributes #1 = { ssp } \ No newline at end of file
+attributes #1 = { ssp }
diff --git a/test/CodeGen/X86/evex-to-vex-compress.mir b/test/CodeGen/X86/evex-to-vex-compress.mir
index b8bc143a6ef..6f9737de1fc 100755
--- a/test/CodeGen/X86/evex-to-vex-compress.mir
+++ b/test/CodeGen/X86/evex-to-vex-compress.mir
@@ -17,878 +17,878 @@
name: evex_z256_to_vex_test
body: |
bb.0:
- ; CHECK: VMOVAPDYmr %rdi, 1, _, 0, _, %ymm0
- VMOVAPDZ256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: %ymm0 = VMOVAPDYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVAPDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVAPDYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVAPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: %ymm0 = VMOVAPDYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVAPDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMOVAPDYrr %ymm0
%ymm0 = VMOVAPDZ256rr %ymm0
; CHECK: %ymm0 = VMOVAPDYrr_REV %ymm0
%ymm0 = VMOVAPDZ256rr_REV %ymm0
- ; CHECK: VMOVAPSYmr %rdi, 1, _, 0, _, %ymm0
- VMOVAPSZ256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: %ymm0 = VMOVAPSYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVAPSZ256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVAPSYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVAPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: %ymm0 = VMOVAPSYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVAPSZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMOVAPSYrr %ymm0
%ymm0 = VMOVAPSZ256rr %ymm0
; CHECK: %ymm0 = VMOVAPSYrr_REV %ymm0
%ymm0 = VMOVAPSZ256rr_REV %ymm0
- ; CHECK: %ymm0 = VMOVDDUPYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVDDUPZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMOVDDUPYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVDDUPZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMOVDDUPYrr %ymm0
%ymm0 = VMOVDDUPZ256rr %ymm0
- ; CHECK: VMOVDQAYmr %rdi, 1, _, 0, _, %ymm0
- VMOVDQA32Z256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: %ymm0 = VMOVDQAYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVDQA32Z256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQAYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVDQA32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: %ymm0 = VMOVDQAYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVDQA32Z256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMOVDQAYrr %ymm0
%ymm0 = VMOVDQA32Z256rr %ymm0
; CHECK: %ymm0 = VMOVDQAYrr_REV %ymm0
%ymm0 = VMOVDQA32Z256rr_REV %ymm0
- ; CHECK: VMOVDQAYmr %rdi, 1, _, 0, _, %ymm0
- VMOVDQA64Z256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: %ymm0 = VMOVDQAYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVDQA64Z256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQAYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVDQA64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: %ymm0 = VMOVDQAYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVDQA64Z256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMOVDQAYrr %ymm0
%ymm0 = VMOVDQA64Z256rr %ymm0
; CHECK: %ymm0 = VMOVDQAYrr_REV %ymm0
%ymm0 = VMOVDQA64Z256rr_REV %ymm0
- ; CHECK: VMOVDQUYmr %rdi, 1, _, 0, _, %ymm0
- VMOVDQU16Z256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVDQU16Z256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQUYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVDQU16Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVDQU16Z256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMOVDQUYrr %ymm0
%ymm0 = VMOVDQU16Z256rr %ymm0
; CHECK: %ymm0 = VMOVDQUYrr_REV %ymm0
%ymm0 = VMOVDQU16Z256rr_REV %ymm0
- ; CHECK: VMOVDQUYmr %rdi, 1, _, 0, _, %ymm0
- VMOVDQU32Z256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVDQU32Z256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQUYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVDQU32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVDQU32Z256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMOVDQUYrr %ymm0
%ymm0 = VMOVDQU32Z256rr %ymm0
; CHECK: %ymm0 = VMOVDQUYrr_REV %ymm0
%ymm0 = VMOVDQU32Z256rr_REV %ymm0
- ; CHECK: VMOVDQUYmr %rdi, 1, _, 0, _, %ymm0
- VMOVDQU64Z256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVDQU64Z256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQUYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVDQU64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVDQU64Z256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMOVDQUYrr %ymm0
%ymm0 = VMOVDQU64Z256rr %ymm0
; CHECK: %ymm0 = VMOVDQUYrr_REV %ymm0
%ymm0 = VMOVDQU64Z256rr_REV %ymm0
- ; CHECK: VMOVDQUYmr %rdi, 1, _, 0, _, %ymm0
- VMOVDQU8Z256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVDQU8Z256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQUYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVDQU8Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVDQU8Z256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMOVDQUYrr %ymm0
%ymm0 = VMOVDQU8Z256rr %ymm0
; CHECK: %ymm0 = VMOVDQUYrr_REV %ymm0
%ymm0 = VMOVDQU8Z256rr_REV %ymm0
- ; CHECK: %ymm0 = VMOVNTDQAYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVNTDQAZ256rm %rip, 1, _, %rax, _
- ; CHECK: VMOVNTDQYmr %rdi, 1, _, 0, _, %ymm0
- VMOVNTDQZ256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: VMOVNTPDYmr %rdi, 1, _, 0, _, %ymm0
- VMOVNTPDZ256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: VMOVNTPSYmr %rdi, 1, _, 0, _, %ymm0
- VMOVNTPSZ256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: %ymm0 = VMOVSHDUPYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVSHDUPZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMOVNTDQAYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVNTDQAZ256rm %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: VMOVNTDQYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVNTDQZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: VMOVNTPDYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVNTPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: VMOVNTPSYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVNTPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: %ymm0 = VMOVSHDUPYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVSHDUPZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMOVSHDUPYrr %ymm0
%ymm0 = VMOVSHDUPZ256rr %ymm0
- ; CHECK: %ymm0 = VMOVSLDUPYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVSLDUPZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMOVSLDUPYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVSLDUPZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMOVSLDUPYrr %ymm0
%ymm0 = VMOVSLDUPZ256rr %ymm0
- ; CHECK: VMOVUPDYmr %rdi, 1, _, 0, _, %ymm0
- VMOVUPDZ256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: %ymm0 = VMOVUPDYrm %rip, 1, _, %rax, _
- %ymm0 = VMOVUPDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVUPDYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVUPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: %ymm0 = VMOVUPDYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMOVUPDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMOVUPDYrr %ymm0
%ymm0 = VMOVUPDZ256rr %ymm0
; CHECK: %ymm0 = VMOVUPDYrr_REV %ymm0
%ymm0 = VMOVUPDZ256rr_REV %ymm0
- ; CHECK: VMOVUPSYmr %rdi, 1, _, 0, _, %ymm0
- VMOVUPSZ256mr %rdi, 1, _, 0, _, %ymm0
- ; CHECK: %ymm0 = VPANDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPANDDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: VMOVUPSYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ VMOVUPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
+ ; CHECK: %ymm0 = VPANDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPANDDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPANDYrr %ymm0, %ymm1
%ymm0 = VPANDDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPANDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPANDQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPANDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPANDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPANDYrr %ymm0, %ymm1
%ymm0 = VPANDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPANDNYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPANDNDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPANDNYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPANDNDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPANDNYrr %ymm0, %ymm1
%ymm0 = VPANDNDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPANDNYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPANDNQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPANDNYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPANDNQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPANDNYrr %ymm0, %ymm1
%ymm0 = VPANDNQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPAVGBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPAVGBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPAVGBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPAVGBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPAVGBYrr %ymm0, %ymm1
%ymm0 = VPAVGBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPAVGWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPAVGWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPAVGWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPAVGWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPAVGWYrr %ymm0, %ymm1
%ymm0 = VPAVGWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPADDBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPADDBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPADDBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPADDBYrr %ymm0, %ymm1
%ymm0 = VPADDBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPADDDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPADDDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPADDDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPADDDYrr %ymm0, %ymm1
%ymm0 = VPADDDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDQYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPADDQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPADDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPADDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPADDQYrr %ymm0, %ymm1
%ymm0 = VPADDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDSBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPADDSBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPADDSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPADDSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPADDSBYrr %ymm0, %ymm1
%ymm0 = VPADDSBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDSWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPADDSWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPADDSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPADDSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPADDSWYrr %ymm0, %ymm1
%ymm0 = VPADDSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDUSBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPADDUSBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPADDUSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPADDUSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPADDUSBYrr %ymm0, %ymm1
%ymm0 = VPADDUSBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDUSWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPADDUSWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPADDUSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPADDUSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPADDUSWYrr %ymm0, %ymm1
%ymm0 = VPADDUSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPADDWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPADDWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPADDWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPADDWYrr %ymm0, %ymm1
%ymm0 = VPADDWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMULPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VMULPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMULPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMULPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMULPDYrr %ymm0, %ymm1
%ymm0 = VMULPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMULPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VMULPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMULPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMULPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMULPSYrr %ymm0, %ymm1
%ymm0 = VMULPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VORPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VORPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VORPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VORPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VORPDYrr %ymm0, %ymm1
%ymm0 = VORPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VORPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VORPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VORPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VORPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VORPSYrr %ymm0, %ymm1
%ymm0 = VORPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMADDUBSWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMADDUBSWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMADDUBSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMADDUBSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMADDUBSWYrr %ymm0, %ymm1
%ymm0 = VPMADDUBSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMADDWDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMADDWDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMADDWDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMADDWDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMADDWDYrr %ymm0, %ymm1
%ymm0 = VPMADDWDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMAXSBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMAXSBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMAXSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMAXSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMAXSBYrr %ymm0, %ymm1
%ymm0 = VPMAXSBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMAXSDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMAXSDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMAXSDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMAXSDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMAXSDYrr %ymm0, %ymm1
%ymm0 = VPMAXSDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMAXSWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMAXSWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMAXSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMAXSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMAXSWYrr %ymm0, %ymm1
%ymm0 = VPMAXSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMAXUBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMAXUBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMAXUBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMAXUBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMAXUBYrr %ymm0, %ymm1
%ymm0 = VPMAXUBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMAXUDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMAXUDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMAXUDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMAXUDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMAXUDYrr %ymm0, %ymm1
%ymm0 = VPMAXUDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMAXUWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMAXUWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMAXUWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMAXUWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMAXUWYrr %ymm0, %ymm1
%ymm0 = VPMAXUWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMINSBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMINSBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMINSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMINSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMINSBYrr %ymm0, %ymm1
%ymm0 = VPMINSBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMINSDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMINSDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMINSDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMINSDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMINSDYrr %ymm0, %ymm1
%ymm0 = VPMINSDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMINSWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMINSWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMINSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMINSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMINSWYrr %ymm0, %ymm1
%ymm0 = VPMINSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMINUBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMINUBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMINUBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMINUBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMINUBYrr %ymm0, %ymm1
%ymm0 = VPMINUBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMINUDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMINUDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMINUDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMINUDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMINUDYrr %ymm0, %ymm1
%ymm0 = VPMINUDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMINUWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMINUWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMINUWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMINUWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMINUWYrr %ymm0, %ymm1
%ymm0 = VPMINUWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULDQYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMULDQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMULDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMULDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMULDQYrr %ymm0, %ymm1
%ymm0 = VPMULDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULHRSWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMULHRSWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMULHRSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMULHRSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMULHRSWYrr %ymm0, %ymm1
%ymm0 = VPMULHRSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULHUWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMULHUWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMULHUWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMULHUWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMULHUWYrr %ymm0, %ymm1
%ymm0 = VPMULHUWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULHWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMULHWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMULHWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMULHWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMULHWYrr %ymm0, %ymm1
%ymm0 = VPMULHWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULLDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMULLDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMULLDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMULLDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMULLDYrr %ymm0, %ymm1
%ymm0 = VPMULLDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULLWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMULLWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMULLWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMULLWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMULLWYrr %ymm0, %ymm1
%ymm0 = VPMULLWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULUDQYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPMULUDQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMULUDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMULUDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMULUDQYrr %ymm0, %ymm1
%ymm0 = VPMULUDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPORYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPORDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPORYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPORDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPORYrr %ymm0, %ymm1
%ymm0 = VPORDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPORYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPORQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPORYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPORQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPORYrr %ymm0, %ymm1
%ymm0 = VPORQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSUBBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSUBBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSUBBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSUBBYrr %ymm0, %ymm1
%ymm0 = VPSUBBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSUBDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSUBDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSUBDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSUBDYrr %ymm0, %ymm1
%ymm0 = VPSUBDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBQYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSUBQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSUBQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSUBQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSUBQYrr %ymm0, %ymm1
%ymm0 = VPSUBQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBSBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSUBSBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSUBSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSUBSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSUBSBYrr %ymm0, %ymm1
%ymm0 = VPSUBSBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBSWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSUBSWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSUBSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSUBSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSUBSWYrr %ymm0, %ymm1
%ymm0 = VPSUBSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBUSBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSUBUSBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSUBUSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSUBUSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSUBUSBYrr %ymm0, %ymm1
%ymm0 = VPSUBUSBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBUSWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSUBUSWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSUBUSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSUBUSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSUBUSWYrr %ymm0, %ymm1
%ymm0 = VPSUBUSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSUBWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSUBWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSUBWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSUBWYrr %ymm0, %ymm1
%ymm0 = VPSUBWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPXORYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPXORDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPXORYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPXORDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPXORYrr %ymm0, %ymm1
%ymm0 = VPXORDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPXORYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPXORQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPXORYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPXORQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPXORYrr %ymm0, %ymm1
%ymm0 = VPXORQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VADDPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VADDPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VADDPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VADDPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VADDPDYrr %ymm0, %ymm1
%ymm0 = VADDPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VADDPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VADDPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VADDPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VADDPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VADDPSYrr %ymm0, %ymm1
%ymm0 = VADDPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VANDNPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VANDNPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VANDNPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VANDNPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VANDNPDYrr %ymm0, %ymm1
%ymm0 = VANDNPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VANDNPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VANDNPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VANDNPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VANDNPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VANDNPSYrr %ymm0, %ymm1
%ymm0 = VANDNPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VANDPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VANDPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VANDPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VANDPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VANDPDYrr %ymm0, %ymm1
%ymm0 = VANDPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VANDPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VANDPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VANDPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VANDPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VANDPSYrr %ymm0, %ymm1
%ymm0 = VANDPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VDIVPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VDIVPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VDIVPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VDIVPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VDIVPDYrr %ymm0, %ymm1
%ymm0 = VDIVPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VDIVPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VDIVPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VDIVPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VDIVPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VDIVPSYrr %ymm0, %ymm1
%ymm0 = VDIVPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMAXCPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VMAXCPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMAXCPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMAXCPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMAXCPDYrr %ymm0, %ymm1
%ymm0 = VMAXCPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMAXCPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VMAXCPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMAXCPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMAXCPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMAXCPSYrr %ymm0, %ymm1
%ymm0 = VMAXCPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMAXCPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VMAXPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMAXCPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMAXPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMAXCPDYrr %ymm0, %ymm1
%ymm0 = VMAXPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMAXCPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VMAXPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMAXCPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMAXPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMAXCPSYrr %ymm0, %ymm1
%ymm0 = VMAXPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMINCPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VMINCPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMINCPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMINCPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMINCPDYrr %ymm0, %ymm1
%ymm0 = VMINCPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMINCPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VMINCPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMINCPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMINCPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMINCPSYrr %ymm0, %ymm1
%ymm0 = VMINCPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMINCPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VMINPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMINCPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMINPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMINCPDYrr %ymm0, %ymm1
%ymm0 = VMINPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMINCPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VMINPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMINCPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VMINPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VMINCPSYrr %ymm0, %ymm1
%ymm0 = VMINPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VXORPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VXORPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VXORPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VXORPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VXORPDYrr %ymm0, %ymm1
%ymm0 = VXORPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VXORPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VXORPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VXORPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VXORPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VXORPSYrr %ymm0, %ymm1
%ymm0 = VXORPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPACKSSDWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPACKSSDWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPACKSSDWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPACKSSDWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPACKSSDWYrr %ymm0, %ymm1
%ymm0 = VPACKSSDWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPACKSSWBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPACKSSWBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPACKSSWBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPACKSSWBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPACKSSWBYrr %ymm0, %ymm1
%ymm0 = VPACKSSWBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPACKUSDWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPACKUSDWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPACKUSDWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPACKUSDWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPACKUSDWYrr %ymm0, %ymm1
%ymm0 = VPACKUSDWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPACKUSWBYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPACKUSWBZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPACKUSWBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPACKUSWBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPACKUSWBYrr %ymm0, %ymm1
%ymm0 = VPACKUSWBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VUNPCKHPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VUNPCKHPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VUNPCKHPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VUNPCKHPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VUNPCKHPDYrr %ymm0, %ymm1
%ymm0 = VUNPCKHPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VUNPCKHPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VUNPCKHPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VUNPCKHPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VUNPCKHPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VUNPCKHPSYrr %ymm0, %ymm1
%ymm0 = VUNPCKHPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VUNPCKLPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VUNPCKLPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VUNPCKLPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VUNPCKLPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VUNPCKLPDYrr %ymm0, %ymm1
%ymm0 = VUNPCKLPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VUNPCKLPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VUNPCKLPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VUNPCKLPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VUNPCKLPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VUNPCKLPSYrr %ymm0, %ymm1
%ymm0 = VUNPCKLPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VSUBPDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VSUBPDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VSUBPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VSUBPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VSUBPDYrr %ymm0, %ymm1
%ymm0 = VSUBPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VSUBPSYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VSUBPSZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VSUBPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VSUBPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VSUBPSYrr %ymm0, %ymm1
%ymm0 = VSUBPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKHBWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPUNPCKHBWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPUNPCKHBWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPUNPCKHBWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPUNPCKHBWYrr %ymm0, %ymm1
%ymm0 = VPUNPCKHBWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKHDQYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPUNPCKHDQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPUNPCKHDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPUNPCKHDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPUNPCKHDQYrr %ymm0, %ymm1
%ymm0 = VPUNPCKHDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKHQDQYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPUNPCKHQDQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPUNPCKHQDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPUNPCKHQDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPUNPCKHQDQYrr %ymm0, %ymm1
%ymm0 = VPUNPCKHQDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKHWDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPUNPCKHWDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPUNPCKHWDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPUNPCKHWDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPUNPCKHWDYrr %ymm0, %ymm1
%ymm0 = VPUNPCKHWDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKLBWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPUNPCKLBWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPUNPCKLBWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPUNPCKLBWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPUNPCKLBWYrr %ymm0, %ymm1
%ymm0 = VPUNPCKLBWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKLDQYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPUNPCKLDQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPUNPCKLDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPUNPCKLDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPUNPCKLDQYrr %ymm0, %ymm1
%ymm0 = VPUNPCKLDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKLQDQYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPUNPCKLQDQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPUNPCKLQDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPUNPCKLQDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPUNPCKLQDQYrr %ymm0, %ymm1
%ymm0 = VPUNPCKLQDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKLWDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPUNPCKLWDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPUNPCKLWDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPUNPCKLWDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPUNPCKLWDYrr %ymm0, %ymm1
%ymm0 = VPUNPCKLWDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VFMADD132PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMADD132PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMADD132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMADD132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMADD132PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMADD132PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADD132PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMADD132PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMADD132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMADD132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMADD132PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMADD132PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADD213PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMADD213PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMADD213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMADD213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMADD213PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMADD213PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADD213PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMADD213PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMADD213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMADD213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMADD213PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMADD213PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADD231PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMADD231PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMADD231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMADD231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMADD231PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMADD231PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADD231PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMADD231PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMADD231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMADD231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMADD231PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMADD231PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADDSUB132PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMADDSUB132PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMADDSUB132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMADDSUB132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMADDSUB132PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMADDSUB132PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADDSUB132PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMADDSUB132PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMADDSUB132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMADDSUB132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMADDSUB132PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMADDSUB132PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADDSUB213PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMADDSUB213PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMADDSUB213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMADDSUB213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMADDSUB213PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMADDSUB213PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADDSUB213PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMADDSUB213PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMADDSUB213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMADDSUB213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMADDSUB213PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMADDSUB213PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADDSUB231PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMADDSUB231PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMADDSUB231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMADDSUB231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMADDSUB231PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMADDSUB231PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADDSUB231PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMADDSUB231PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMADDSUB231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMADDSUB231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMADDSUB231PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMADDSUB231PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUB132PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMSUB132PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMSUB132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMSUB132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMSUB132PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMSUB132PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUB132PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMSUB132PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMSUB132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMSUB132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMSUB132PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMSUB132PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUB213PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMSUB213PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMSUB213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMSUB213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMSUB213PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMSUB213PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUB213PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMSUB213PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMSUB213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMSUB213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMSUB213PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMSUB213PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUB231PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMSUB231PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMSUB231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMSUB231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMSUB231PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMSUB231PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUB231PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMSUB231PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMSUB231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMSUB231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMSUB231PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMSUB231PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUBADD132PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMSUBADD132PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMSUBADD132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMSUBADD132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMSUBADD132PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMSUBADD132PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUBADD132PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMSUBADD132PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMSUBADD132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMSUBADD132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMSUBADD132PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMSUBADD132PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUBADD213PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMSUBADD213PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMSUBADD213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMSUBADD213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMSUBADD213PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMSUBADD213PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUBADD213PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMSUBADD213PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMSUBADD213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMSUBADD213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMSUBADD213PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMSUBADD213PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUBADD231PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMSUBADD231PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMSUBADD231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMSUBADD231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMSUBADD231PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMSUBADD231PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUBADD231PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFMSUBADD231PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFMSUBADD231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFMSUBADD231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFMSUBADD231PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFMSUBADD231PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMADD132PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFNMADD132PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFNMADD132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFNMADD132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFNMADD132PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFNMADD132PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMADD132PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFNMADD132PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFNMADD132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFNMADD132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFNMADD132PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFNMADD132PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMADD213PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFNMADD213PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFNMADD213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFNMADD213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFNMADD213PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFNMADD213PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMADD213PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFNMADD213PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFNMADD213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFNMADD213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFNMADD213PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFNMADD213PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMADD231PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFNMADD231PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFNMADD231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFNMADD231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFNMADD231PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFNMADD231PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMADD231PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFNMADD231PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFNMADD231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFNMADD231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFNMADD231PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFNMADD231PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMSUB132PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFNMSUB132PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFNMSUB132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFNMSUB132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFNMSUB132PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFNMSUB132PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMSUB132PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFNMSUB132PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFNMSUB132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFNMSUB132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFNMSUB132PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFNMSUB132PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMSUB213PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFNMSUB213PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFNMSUB213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFNMSUB213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFNMSUB213PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFNMSUB213PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMSUB213PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFNMSUB213PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFNMSUB213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFNMSUB213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFNMSUB213PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFNMSUB213PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMSUB231PDYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFNMSUB231PDZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFNMSUB231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFNMSUB231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFNMSUB231PDYr %ymm0, %ymm1, %ymm2
%ymm0 = VFNMSUB231PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMSUB231PSYm %ymm0, %ymm0, %rsi, 1, _, 0, _
- %ymm0 = VFNMSUB231PSZ256m %ymm0, %ymm0, %rsi, 1, _, 0, _
+ ; CHECK: %ymm0 = VFNMSUB231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
+ %ymm0 = VFNMSUB231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VFNMSUB231PSYr %ymm0, %ymm1, %ymm2
%ymm0 = VFNMSUB231PSZ256r %ymm0, %ymm1, %ymm2
; CHECK: %ymm0 = VPSRADYri %ymm0, 7
%ymm0 = VPSRADZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSRADYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSRADZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSRADYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSRADZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSRADYrr %ymm0, %xmm1
%ymm0 = VPSRADZ256rr %ymm0, %xmm1
- ; CHECK: %ymm0 = VPSRAVDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSRAVDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSRAVDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSRAVDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSRAVDYrr %ymm0, %ymm1
%ymm0 = VPSRAVDZ256rr %ymm0, %ymm1
; CHECK: %ymm0 = VPSRAWYri %ymm0, 7
%ymm0 = VPSRAWZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSRAWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSRAWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSRAWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSRAWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSRAWYrr %ymm0, %xmm1
%ymm0 = VPSRAWZ256rr %ymm0, %xmm1
; CHECK: %ymm0 = VPSRLDQYri %ymm0, %ymm1
%ymm0 = VPSRLDQZ256rr %ymm0, %ymm1
; CHECK: %ymm0 = VPSRLDYri %ymm0, 7
%ymm0 = VPSRLDZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSRLDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSRLDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSRLDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSRLDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSRLDYrr %ymm0, %xmm1
%ymm0 = VPSRLDZ256rr %ymm0, %xmm1
; CHECK: %ymm0 = VPSRLQYri %ymm0, 7
%ymm0 = VPSRLQZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSRLQYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSRLQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSRLQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSRLQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSRLQYrr %ymm0, %xmm1
%ymm0 = VPSRLQZ256rr %ymm0, %xmm1
- ; CHECK: %ymm0 = VPSRLVDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSRLVDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSRLVDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSRLVDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSRLVDYrr %ymm0, %ymm1
%ymm0 = VPSRLVDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSRLVQYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSRLVQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSRLVQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSRLVQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSRLVQYrr %ymm0, %ymm1
%ymm0 = VPSRLVQZ256rr %ymm0, %ymm1
; CHECK: %ymm0 = VPSRLWYri %ymm0, 7
%ymm0 = VPSRLWZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSRLWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSRLWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSRLWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSRLWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSRLWYrr %ymm0, %xmm1
%ymm0 = VPSRLWZ256rr %ymm0, %xmm1
- ; CHECK: %ymm0 = VPMOVSXBDYrm %rip, 1, _, %rax, _
- %ymm0 = VPMOVSXBDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMOVSXBDYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMOVSXBDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMOVSXBDYrr %xmm0
%ymm0 = VPMOVSXBDZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVSXBQYrm %rip, 1, _, %rax, _
- %ymm0 = VPMOVSXBQZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMOVSXBQYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMOVSXBQZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMOVSXBQYrr %xmm0
%ymm0 = VPMOVSXBQZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVSXBWYrm %rip, 1, _, %rax, _
- %ymm0 = VPMOVSXBWZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMOVSXBWYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMOVSXBWZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMOVSXBWYrr %xmm0
%ymm0 = VPMOVSXBWZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVSXDQYrm %rip, 1, _, %rax, _
- %ymm0 = VPMOVSXDQZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMOVSXDQYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMOVSXDQZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMOVSXDQYrr %xmm0
%ymm0 = VPMOVSXDQZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVSXWDYrm %rip, 1, _, %rax, _
- %ymm0 = VPMOVSXWDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMOVSXWDYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMOVSXWDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMOVSXWDYrr %xmm0
%ymm0 = VPMOVSXWDZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVSXWQYrm %rip, 1, _, %rax, _
- %ymm0 = VPMOVSXWQZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMOVSXWQYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMOVSXWQZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMOVSXWQYrr %xmm0
%ymm0 = VPMOVSXWQZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVZXBDYrm %rip, 1, _, %rax, _
- %ymm0 = VPMOVZXBDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMOVZXBDYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMOVZXBDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMOVZXBDYrr %xmm0
%ymm0 = VPMOVZXBDZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVZXBQYrm %rip, 1, _, %rax, _
- %ymm0 = VPMOVZXBQZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMOVZXBQYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMOVZXBQZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMOVZXBQYrr %xmm0
%ymm0 = VPMOVZXBQZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVZXBWYrm %rip, 1, _, %rax, _
- %ymm0 = VPMOVZXBWZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMOVZXBWYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMOVZXBWZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMOVZXBWYrr %xmm0
%ymm0 = VPMOVZXBWZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVZXDQYrm %rip, 1, _, %rax, _
- %ymm0 = VPMOVZXDQZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMOVZXDQYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMOVZXDQZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMOVZXDQYrr %xmm0
%ymm0 = VPMOVZXDQZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVZXWDYrm %rip, 1, _, %rax, _
- %ymm0 = VPMOVZXWDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMOVZXWDYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMOVZXWDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMOVZXWDYrr %xmm0
%ymm0 = VPMOVZXWDZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVZXWQYrm %rip, 1, _, %rax, _
- %ymm0 = VPMOVZXWQZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPMOVZXWQYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPMOVZXWQZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPMOVZXWQYrr %xmm0
%ymm0 = VPMOVZXWQZ256rr %xmm0
- ; CHECK: %ymm0 = VBROADCASTF128 %rip, 1, _, %rax, _
- %ymm0 = VBROADCASTF32X4Z256rm %rip, 1, _, %rax, _
- ; CHECK: %ymm0 = VBROADCASTSDYrm %rip, 1, _, %rax, _
- %ymm0 = VBROADCASTF32X2Z256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VBROADCASTF128 %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VBROADCASTF32X4Z256rm %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %ymm0 = VBROADCASTSDYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VBROADCASTF32X2Z256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VBROADCASTSDYrr %xmm0
%ymm0 = VBROADCASTF32X2Z256r %xmm0
- ; CHECK: %ymm0 = VBROADCASTSDYrm %rip, 1, _, %rax, _
- %ymm0 = VBROADCASTSDZ256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VBROADCASTSDYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VBROADCASTSDZ256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VBROADCASTSDYrr %xmm0
%ymm0 = VBROADCASTSDZ256r %xmm0
- ; CHECK: %ymm0 = VBROADCASTSSYrm %rip, 1, _, %rax, _
- %ymm0 = VBROADCASTSSZ256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VBROADCASTSSYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VBROADCASTSSZ256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VBROADCASTSSYrr %xmm0
%ymm0 = VBROADCASTSSZ256r %xmm0
- ; CHECK: %ymm0 = VPBROADCASTBYrm %rip, 1, _, %rax, _
- %ymm0 = VPBROADCASTBZ256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPBROADCASTBYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPBROADCASTBZ256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPBROADCASTBYrr %xmm0
%ymm0 = VPBROADCASTBZ256r %xmm0
- ; CHECK: %ymm0 = VPBROADCASTDYrm %rip, 1, _, %rax, _
- %ymm0 = VPBROADCASTDZ256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPBROADCASTDYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPBROADCASTDZ256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPBROADCASTDYrr %xmm0
%ymm0 = VPBROADCASTDZ256r %xmm0
- ; CHECK: %ymm0 = VPBROADCASTWYrm %rip, 1, _, %rax, _
- %ymm0 = VPBROADCASTWZ256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPBROADCASTWYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPBROADCASTWZ256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPBROADCASTWYrr %xmm0
%ymm0 = VPBROADCASTWZ256r %xmm0
- ; CHECK: %ymm0 = VBROADCASTI128 %rip, 1, _, %rax, _
- %ymm0 = VBROADCASTI32X4Z256rm %rip, 1, _, %rax, _
- ; CHECK: %ymm0 = VPBROADCASTQYrm %rip, 1, _, %rax, _
- %ymm0 = VBROADCASTI32X2Z256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VBROADCASTI128 %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VBROADCASTI32X4Z256rm %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %ymm0 = VPBROADCASTQYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VBROADCASTI32X2Z256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPBROADCASTQYrr %xmm0
%ymm0 = VBROADCASTI32X2Z256r %xmm0
- ; CHECK: %ymm0 = VPBROADCASTQYrm %rip, 1, _, %rax, _
- %ymm0 = VPBROADCASTQZ256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPBROADCASTQYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPBROADCASTQZ256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPBROADCASTQYrr %xmm0
%ymm0 = VPBROADCASTQZ256r %xmm0
- ; CHECK: %ymm0 = VPABSBYrm %rip, 1, _, %rax, _
- %ymm0 = VPABSBZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPABSBYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPABSBZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPABSBYrr %ymm0
%ymm0 = VPABSBZ256rr %ymm0
- ; CHECK: %ymm0 = VPABSDYrm %rip, 1, _, %rax, _
- %ymm0 = VPABSDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPABSDYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPABSDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPABSDYrr %ymm0
%ymm0 = VPABSDZ256rr %ymm0
- ; CHECK: %ymm0 = VPABSWYrm %rip, 1, _, %rax, _
- %ymm0 = VPABSWZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPABSWYrm %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPABSWZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPABSWYrr %ymm0
%ymm0 = VPABSWZ256rr %ymm0
- ; CHECK: %ymm0 = VPSADBWYrm %ymm0, 1, _, %rax, _, _
- %ymm0 = VPSADBWZ256rm %ymm0, 1, _, %rax, _, _
+ ; CHECK: %ymm0 = VPSADBWYrm %ymm0, 1, %noreg, %rax, %noreg, %noreg
+ %ymm0 = VPSADBWZ256rm %ymm0, 1, %noreg, %rax, %noreg, %noreg
; CHECK: %ymm0 = VPSADBWYrr %ymm0, %ymm1
%ymm0 = VPSADBWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPERMDYrm %ymm0, %rdi, 1, _, 0, _
- %ymm0 = VPERMDZ256rm %ymm0, %rdi, 1, _, 0, _
+ ; CHECK: %ymm0 = VPERMDYrm %ymm0, %rdi, 1, %noreg, 0, %noreg
+ %ymm0 = VPERMDZ256rm %ymm0, %rdi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VPERMDYrr %ymm1, %ymm0
%ymm0 = VPERMDZ256rr %ymm1, %ymm0
- ; CHECK: %ymm0 = VPERMILPDYmi %rdi, 1, _, 0, _, _
- %ymm0 = VPERMILPDZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm0 = VPERMILPDYmi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm0 = VPERMILPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm0 = VPERMILPDYri %ymm0, 7
%ymm0 = VPERMILPDZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPERMILPDYrm %ymm0, %rdi, 1, _, 0, _
- %ymm0 = VPERMILPDZ256rm %ymm0, %rdi, 1, _, 0, _
+ ; CHECK: %ymm0 = VPERMILPDYrm %ymm0, %rdi, 1, %noreg, 0, %noreg
+ %ymm0 = VPERMILPDZ256rm %ymm0, %rdi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VPERMILPDYrr %ymm1, %ymm0
%ymm0 = VPERMILPDZ256rr %ymm1, %ymm0
- ; CHECK: %ymm0 = VPERMILPSYmi %rdi, 1, _, 0, _, _
- %ymm0 = VPERMILPSZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm0 = VPERMILPSYmi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm0 = VPERMILPSZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm0 = VPERMILPSYri %ymm0, 7
%ymm0 = VPERMILPSZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPERMILPSYrm %ymm0, %rdi, 1, _, 0, _
- %ymm0 = VPERMILPSZ256rm %ymm0, %rdi, 1, _, 0, _
+ ; CHECK: %ymm0 = VPERMILPSYrm %ymm0, %rdi, 1, %noreg, 0, %noreg
+ %ymm0 = VPERMILPSZ256rm %ymm0, %rdi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VPERMILPSYrr %ymm1, %ymm0
%ymm0 = VPERMILPSZ256rr %ymm1, %ymm0
- ; CHECK: %ymm0 = VPERMPDYmi %rdi, 1, _, 0, _, _
- %ymm0 = VPERMPDZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm0 = VPERMPDYmi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm0 = VPERMPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm0 = VPERMPDYri %ymm0, 7
%ymm0 = VPERMPDZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPERMPSYrm %ymm0, %rdi, 1, _, 0, _
- %ymm0 = VPERMPSZ256rm %ymm0, %rdi, 1, _, 0, _
+ ; CHECK: %ymm0 = VPERMPSYrm %ymm0, %rdi, 1, %noreg, 0, %noreg
+ %ymm0 = VPERMPSZ256rm %ymm0, %rdi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VPERMPSYrr %ymm1, %ymm0
%ymm0 = VPERMPSZ256rr %ymm1, %ymm0
- ; CHECK: %ymm0 = VPERMQYmi %rdi, 1, _, 0, _, _
- %ymm0 = VPERMQZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm0 = VPERMQYmi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm0 = VPERMQZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm0 = VPERMQYri %ymm0, 7
%ymm0 = VPERMQZ256ri %ymm0, 7
; CHECK: %ymm0 = VPSLLDQYri %ymm0, 14
%ymm0 = VPSLLDQZ256rr %ymm0, 14
; CHECK: %ymm0 = VPSLLDYri %ymm0, 7
%ymm0 = VPSLLDZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSLLDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSLLDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSLLDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSLLDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSLLDYrr %ymm0, 14
%ymm0 = VPSLLDZ256rr %ymm0, 14
; CHECK: %ymm0 = VPSLLQYri %ymm0, 7
%ymm0 = VPSLLQZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSLLQYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSLLQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSLLQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSLLQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSLLQYrr %ymm0, 14
%ymm0 = VPSLLQZ256rr %ymm0, 14
- ; CHECK: %ymm0 = VPSLLVDYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSLLVDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSLLVDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSLLVDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSLLVDYrr %ymm0, 14
%ymm0 = VPSLLVDZ256rr %ymm0, 14
- ; CHECK: %ymm0 = VPSLLVQYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSLLVQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSLLVQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSLLVQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSLLVQYrr %ymm0, 14
%ymm0 = VPSLLVQZ256rr %ymm0, 14
; CHECK: %ymm0 = VPSLLWYri %ymm0, 7
%ymm0 = VPSLLWZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSLLWYrm %ymm0, %rip, 1, _, %rax, _
- %ymm0 = VPSLLWZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPSLLWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
+ %ymm0 = VPSLLWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm0 = VPSLLWYrr %ymm0, 14
%ymm0 = VPSLLWZ256rr %ymm0, 14
- ; CHECK: %ymm0 = VCVTDQ2PDYrm %rdi, %ymm0, 1, _, 0
- %ymm0 = VCVTDQ2PDZ256rm %rdi, %ymm0, 1, _, 0
+ ; CHECK: %ymm0 = VCVTDQ2PDYrm %rdi, %ymm0, 1, %noreg, 0
+ %ymm0 = VCVTDQ2PDZ256rm %rdi, %ymm0, 1, %noreg, 0
; CHECK: %ymm0 = VCVTDQ2PDYrr %xmm0
%ymm0 = VCVTDQ2PDZ256rr %xmm0
- ; CHECK: %ymm0 = VCVTDQ2PSYrm %rdi, %ymm0, 1, _, 0
- %ymm0 = VCVTDQ2PSZ256rm %rdi, %ymm0, 1, _, 0
+ ; CHECK: %ymm0 = VCVTDQ2PSYrm %rdi, %ymm0, 1, %noreg, 0
+ %ymm0 = VCVTDQ2PSZ256rm %rdi, %ymm0, 1, %noreg, 0
; CHECK: %ymm0 = VCVTDQ2PSYrr %ymm0
%ymm0 = VCVTDQ2PSZ256rr %ymm0
- ; CHECK: %xmm0 = VCVTPD2DQYrm %rdi, %ymm0, 1, _, 0
- %xmm0 = VCVTPD2DQZ256rm %rdi, %ymm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTPD2DQYrm %rdi, %ymm0, 1, %noreg, 0
+ %xmm0 = VCVTPD2DQZ256rm %rdi, %ymm0, 1, %noreg, 0
; CHECK: %xmm0 = VCVTPD2DQYrr %ymm0
%xmm0 = VCVTPD2DQZ256rr %ymm0
- ; CHECK: %xmm0 = VCVTPD2PSYrm %rdi, %ymm0, 1, _, 0
- %xmm0 = VCVTPD2PSZ256rm %rdi, %ymm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTPD2PSYrm %rdi, %ymm0, 1, %noreg, 0
+ %xmm0 = VCVTPD2PSZ256rm %rdi, %ymm0, 1, %noreg, 0
; CHECK: %xmm0 = VCVTPD2PSYrr %ymm0
%xmm0 = VCVTPD2PSZ256rr %ymm0
- ; CHECK: %ymm0 = VCVTPS2DQYrm %rdi, %ymm0, 1, _, 0
- %ymm0 = VCVTPS2DQZ256rm %rdi, %ymm0, 1, _, 0
+ ; CHECK: %ymm0 = VCVTPS2DQYrm %rdi, %ymm0, 1, %noreg, 0
+ %ymm0 = VCVTPS2DQZ256rm %rdi, %ymm0, 1, %noreg, 0
; CHECK: %ymm0 = VCVTPS2DQYrr %ymm0
%ymm0 = VCVTPS2DQZ256rr %ymm0
- ; CHECK: %ymm0 = VCVTPS2PDYrm %rdi, %ymm0, 1, _, 0
- %ymm0 = VCVTPS2PDZ256rm %rdi, %ymm0, 1, _, 0
+ ; CHECK: %ymm0 = VCVTPS2PDYrm %rdi, %ymm0, 1, %noreg, 0
+ %ymm0 = VCVTPS2PDZ256rm %rdi, %ymm0, 1, %noreg, 0
; CHECK: %ymm0 = VCVTPS2PDYrr %xmm0
%ymm0 = VCVTPS2PDZ256rr %xmm0
- ; CHECK: VCVTPS2PHYmr %rdi, %ymm0, 1, _, 0, _, _
- VCVTPS2PHZ256mr %rdi, %ymm0, 1, _, 0, _, _
- ; CHECK: %xmm0 = VCVTPS2PHYrr %ymm0, _
- %xmm0 = VCVTPS2PHZ256rr %ymm0, _
- ; CHECK: %ymm0 = VCVTPH2PSYrm %rdi, %ymm0, 1, _, 0
- %ymm0 = VCVTPH2PSZ256rm %rdi, %ymm0, 1, _, 0
+ ; CHECK: VCVTPS2PHYmr %rdi, %ymm0, 1, %noreg, 0, %noreg, %noreg
+ VCVTPS2PHZ256mr %rdi, %ymm0, 1, %noreg, 0, %noreg, %noreg
+ ; CHECK: %xmm0 = VCVTPS2PHYrr %ymm0, %noreg
+ %xmm0 = VCVTPS2PHZ256rr %ymm0, %noreg
+ ; CHECK: %ymm0 = VCVTPH2PSYrm %rdi, %ymm0, 1, %noreg, 0
+ %ymm0 = VCVTPH2PSZ256rm %rdi, %ymm0, 1, %noreg, 0
; CHECK: %ymm0 = VCVTPH2PSYrr %xmm0
%ymm0 = VCVTPH2PSZ256rr %xmm0
- ; CHECK: %xmm0 = VCVTTPD2DQYrm %rdi, %ymm0, 1, _, 0
- %xmm0 = VCVTTPD2DQZ256rm %rdi, %ymm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTTPD2DQYrm %rdi, %ymm0, 1, %noreg, 0
+ %xmm0 = VCVTTPD2DQZ256rm %rdi, %ymm0, 1, %noreg, 0
; CHECK: %xmm0 = VCVTTPD2DQYrr %ymm0
%xmm0 = VCVTTPD2DQZ256rr %ymm0
- ; CHECK: %ymm0 = VCVTTPS2DQYrm %rdi, %ymm0, 1, _, 0
- %ymm0 = VCVTTPS2DQZ256rm %rdi, %ymm0, 1, _, 0
+ ; CHECK: %ymm0 = VCVTTPS2DQYrm %rdi, %ymm0, 1, %noreg, 0
+ %ymm0 = VCVTTPS2DQZ256rm %rdi, %ymm0, 1, %noreg, 0
; CHECK: %ymm0 = VCVTTPS2DQYrr %ymm0
%ymm0 = VCVTTPS2DQZ256rr %ymm0
- ; CHECK: %ymm0 = VSQRTPDYm %rdi, _, _, _, _
- %ymm0 = VSQRTPDZ256m %rdi, _, _, _, _
+ ; CHECK: %ymm0 = VSQRTPDYm %rdi, %noreg, %noreg, %noreg, %noreg
+ %ymm0 = VSQRTPDZ256m %rdi, %noreg, %noreg, %noreg, %noreg
; CHECK: %ymm0 = VSQRTPDYr %ymm0
%ymm0 = VSQRTPDZ256r %ymm0
- ; CHECK: %ymm0 = VSQRTPSYm %rdi, _, _, _, _
- %ymm0 = VSQRTPSZ256m %rdi, _, _, _, _
+ ; CHECK: %ymm0 = VSQRTPSYm %rdi, %noreg, %noreg, %noreg, %noreg
+ %ymm0 = VSQRTPSZ256m %rdi, %noreg, %noreg, %noreg, %noreg
; CHECK: %ymm0 = VSQRTPSYr %ymm0
%ymm0 = VSQRTPSZ256r %ymm0
- ; CHECK: %ymm0 = VPALIGNRYrmi %ymm0, %rdi, _, _, _, _, _
- %ymm0 = VPALIGNRZ256rmi %ymm0, %rdi, _, _, _, _, _
- ; CHECK: %ymm0 = VPALIGNRYrri %ymm0, %ymm1, _
- %ymm0 = VPALIGNRZ256rri %ymm0, %ymm1, _
- ; CHECK: %ymm0 = VMOVUPSYrm %rdi, 1, _, 0, _
- %ymm0 = VMOVUPSZ256rm %rdi, 1, _, 0, _
+ ; CHECK: %ymm0 = VPALIGNRYrmi %ymm0, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
+ %ymm0 = VPALIGNRZ256rmi %ymm0, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %ymm0 = VPALIGNRYrri %ymm0, %ymm1, %noreg
+ %ymm0 = VPALIGNRZ256rri %ymm0, %ymm1, %noreg
+ ; CHECK: %ymm0 = VMOVUPSYrm %rdi, 1, %noreg, 0, %noreg
+ %ymm0 = VMOVUPSZ256rm %rdi, 1, %noreg, 0, %noreg
; CHECK: %ymm0 = VMOVUPSYrr %ymm0
%ymm0 = VMOVUPSZ256rr %ymm0
; CHECK: %ymm0 = VMOVUPSYrr_REV %ymm0
%ymm0 = VMOVUPSZ256rr_REV %ymm0
- ; CHECK: %ymm0 = VPSHUFBYrm %ymm0, _, _, _, _, _
- %ymm0 = VPSHUFBZ256rm %ymm0, _, _, _, _, _
+ ; CHECK: %ymm0 = VPSHUFBYrm %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg
+ %ymm0 = VPSHUFBZ256rm %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg
; CHECK: %ymm0 = VPSHUFBYrr %ymm0, %ymm1
%ymm0 = VPSHUFBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSHUFDYmi %rdi, 1, _, 0, _, _
- %ymm0 = VPSHUFDZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm0 = VPSHUFDYmi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm0 = VPSHUFDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm0 = VPSHUFDYri %ymm0, -24
%ymm0 = VPSHUFDZ256ri %ymm0, -24
- ; CHECK: %ymm0 = VPSHUFHWYmi %rdi, 1, _, 0, _, _
- %ymm0 = VPSHUFHWZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm0 = VPSHUFHWYmi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm0 = VPSHUFHWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm0 = VPSHUFHWYri %ymm0, -24
%ymm0 = VPSHUFHWZ256ri %ymm0, -24
- ; CHECK: %ymm0 = VPSHUFLWYmi %rdi, 1, _, 0, _, _
- %ymm0 = VPSHUFLWZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm0 = VPSHUFLWYmi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm0 = VPSHUFLWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm0 = VPSHUFLWYri %ymm0, -24
%ymm0 = VPSHUFLWZ256ri %ymm0, -24
- ; CHECK: %ymm0 = VSHUFPDYrmi %ymm0, _, _, _, _, _, _
- %ymm0 = VSHUFPDZ256rmi %ymm0, _, _, _, _, _, _
- ; CHECK: %ymm0 = VSHUFPDYrri %ymm0, _, _
- %ymm0 = VSHUFPDZ256rri %ymm0, _, _
- ; CHECK: %ymm0 = VSHUFPSYrmi %ymm0, _, _, _, _, _, _
- %ymm0 = VSHUFPSZ256rmi %ymm0, _, _, _, _, _, _
- ; CHECK: %ymm0 = VSHUFPSYrri %ymm0, _, _
- %ymm0 = VSHUFPSZ256rri %ymm0, _, _
+ ; CHECK: %ymm0 = VSHUFPDYrmi %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ %ymm0 = VSHUFPDZ256rmi %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %ymm0 = VSHUFPDYrri %ymm0, %noreg, %noreg
+ %ymm0 = VSHUFPDZ256rri %ymm0, %noreg, %noreg
+ ; CHECK: %ymm0 = VSHUFPSYrmi %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ %ymm0 = VSHUFPSZ256rmi %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %ymm0 = VSHUFPSYrri %ymm0, %noreg, %noreg
+ %ymm0 = VSHUFPSZ256rri %ymm0, %noreg, %noreg
RET 0, %zmm0, %zmm1
...
@@ -899,80 +899,80 @@ body: |
name: evex_z128_to_vex_test
body: |
bb.0:
- ; CHECK: VMOVAPDmr %rdi, 1, _, 0, _, %xmm0
- VMOVAPDZ128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVAPDrm %rip, 1, _, %rax, _
- %xmm0 = VMOVAPDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVAPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVAPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVAPDrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMOVAPDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMOVAPDrr %xmm0
%xmm0 = VMOVAPDZ128rr %xmm0
- ; CHECK: VMOVAPSmr %rdi, 1, _, 0, _, %xmm0
- VMOVAPSZ128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVAPSrm %rip, 1, _, %rax, _
- %xmm0 = VMOVAPSZ128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVAPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVAPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVAPSrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMOVAPSZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMOVAPSrr %xmm0
%xmm0 = VMOVAPSZ128rr %xmm0
- ; CHECK: VMOVDQAmr %rdi, 1, _, 0, _, %xmm0
- VMOVDQA32Z128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVDQArm %rip, 1, _, %rax, _
- %xmm0 = VMOVDQA32Z128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQAmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVDQA32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVDQArm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMOVDQA32Z128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMOVDQArr %xmm0
%xmm0 = VMOVDQA32Z128rr %xmm0
- ; CHECK: VMOVDQAmr %rdi, 1, _, 0, _, %xmm0
- VMOVDQA64Z128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVDQArm %rip, 1, _, %rax, _
- %xmm0 = VMOVDQA64Z128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQAmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVDQA64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVDQArm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMOVDQA64Z128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMOVDQArr %xmm0
%xmm0 = VMOVDQA64Z128rr %xmm0
- ; CHECK: VMOVDQUmr %rdi, 1, _, 0, _, %xmm0
- VMOVDQU16Z128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, _, %rax, _
- %xmm0 = VMOVDQU16Z128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQUmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVDQU16Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMOVDQU16Z128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMOVDQUrr %xmm0
%xmm0 = VMOVDQU16Z128rr %xmm0
- ; CHECK: VMOVDQUmr %rdi, 1, _, 0, _, %xmm0
- VMOVDQU32Z128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, _, %rax, _
- %xmm0 = VMOVDQU32Z128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQUmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVDQU32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMOVDQU32Z128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMOVDQUrr %xmm0
%xmm0 = VMOVDQU32Z128rr %xmm0
- ; CHECK: VMOVDQUmr %rdi, 1, _, 0, _, %xmm0
- VMOVDQU64Z128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, _, %rax, _
- %xmm0 = VMOVDQU64Z128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQUmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVDQU64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMOVDQU64Z128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMOVDQUrr %xmm0
%xmm0 = VMOVDQU64Z128rr %xmm0
- ; CHECK: VMOVDQUmr %rdi, 1, _, 0, _, %xmm0
- VMOVDQU8Z128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, _, %rax, _
- %xmm0 = VMOVDQU8Z128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQUmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVDQU8Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMOVDQU8Z128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMOVDQUrr %xmm0
%xmm0 = VMOVDQU8Z128rr %xmm0
; CHECK: %xmm0 = VMOVDQUrr_REV %xmm0
%xmm0 = VMOVDQU8Z128rr_REV %xmm0
- ; CHECK: %xmm0 = VMOVNTDQArm %rip, 1, _, %rax, _
- %xmm0 = VMOVNTDQAZ128rm %rip, 1, _, %rax, _
- ; CHECK: VMOVUPDmr %rdi, 1, _, 0, _, %xmm0
- VMOVUPDZ128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVUPDrm %rip, 1, _, %rax, _
- %xmm0 = VMOVUPDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMOVNTDQArm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMOVNTDQAZ128rm %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: VMOVUPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVUPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVUPDrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMOVUPDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMOVUPDrr %xmm0
%xmm0 = VMOVUPDZ128rr %xmm0
; CHECK: %xmm0 = VMOVUPDrr_REV %xmm0
%xmm0 = VMOVUPDZ128rr_REV %xmm0
- ; CHECK: VMOVUPSmr %rdi, 1, _, 0, _, %xmm0
- VMOVUPSZ128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVUPSrm %rip, 1, _, %rax, _
- %xmm0 = VMOVUPSZ128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVUPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVUPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVUPSrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMOVUPSZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMOVUPSrr %xmm0
%xmm0 = VMOVUPSZ128rr %xmm0
; CHECK: %xmm0 = VMOVUPSrr_REV %xmm0
%xmm0 = VMOVUPSZ128rr_REV %xmm0
- ; CHECK: VMOVNTDQmr %rdi, 1, _, 0, _, %xmm0
- VMOVNTDQZ128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: VMOVNTPDmr %rdi, 1, _, 0, _, %xmm0
- VMOVNTPDZ128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: VMOVNTPSmr %rdi, 1, _, 0, _, %xmm0
- VMOVNTPSZ128mr %rdi, 1, _, 0, _, %xmm0
+ ; CHECK: VMOVNTDQmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVNTDQZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: VMOVNTPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVNTPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: VMOVNTPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVNTPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
; CHECK: %xmm0 = VMOVAPDrr_REV %xmm0
%xmm0 = VMOVAPDZ128rr_REV %xmm0
; CHECK: %xmm0 = VMOVAPSrr_REV %xmm0
@@ -987,776 +987,776 @@ body: |
%xmm0 = VMOVDQU32Z128rr_REV %xmm0
; CHECK: %xmm0 = VMOVDQUrr_REV %xmm0
%xmm0 = VMOVDQU64Z128rr_REV %xmm0
- ; CHECK: %xmm0 = VPMOVSXBDrm %rip, 1, _, %rax, _
- %xmm0 = VPMOVSXBDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMOVSXBDrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMOVSXBDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMOVSXBDrr %xmm0
%xmm0 = VPMOVSXBDZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVSXBQrm %rip, 1, _, %rax, _
- %xmm0 = VPMOVSXBQZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMOVSXBQrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMOVSXBQZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMOVSXBQrr %xmm0
%xmm0 = VPMOVSXBQZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVSXBWrm %rip, 1, _, %rax, _
- %xmm0 = VPMOVSXBWZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMOVSXBWrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMOVSXBWZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMOVSXBWrr %xmm0
%xmm0 = VPMOVSXBWZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVSXDQrm %rip, 1, _, %rax, _
- %xmm0 = VPMOVSXDQZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMOVSXDQrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMOVSXDQZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMOVSXDQrr %xmm0
%xmm0 = VPMOVSXDQZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVSXWDrm %rip, 1, _, %rax, _
- %xmm0 = VPMOVSXWDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMOVSXWDrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMOVSXWDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMOVSXWDrr %xmm0
%xmm0 = VPMOVSXWDZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVSXWQrm %rip, 1, _, %rax, _
- %xmm0 = VPMOVSXWQZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMOVSXWQrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMOVSXWQZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMOVSXWQrr %xmm0
%xmm0 = VPMOVSXWQZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVZXBDrm %rip, 1, _, %rax, _
- %xmm0 = VPMOVZXBDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMOVZXBDrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMOVZXBDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMOVZXBDrr %xmm0
%xmm0 = VPMOVZXBDZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVZXBQrm %rip, 1, _, %rax, _
- %xmm0 = VPMOVZXBQZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMOVZXBQrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMOVZXBQZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMOVZXBQrr %xmm0
%xmm0 = VPMOVZXBQZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVZXBWrm %rip, 1, _, %rax, _
- %xmm0 = VPMOVZXBWZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMOVZXBWrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMOVZXBWZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMOVZXBWrr %xmm0
%xmm0 = VPMOVZXBWZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVZXDQrm %rip, 1, _, %rax, _
- %xmm0 = VPMOVZXDQZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMOVZXDQrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMOVZXDQZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMOVZXDQrr %xmm0
%xmm0 = VPMOVZXDQZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVZXWDrm %rip, 1, _, %rax, _
- %xmm0 = VPMOVZXWDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMOVZXWDrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMOVZXWDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMOVZXWDrr %xmm0
%xmm0 = VPMOVZXWDZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVZXWQrm %rip, 1, _, %rax, _
- %xmm0 = VPMOVZXWQZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMOVZXWQrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMOVZXWQZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMOVZXWQrr %xmm0
%xmm0 = VPMOVZXWQZ128rr %xmm0
- ; CHECK: VMOVHPDmr %rdi, 1, _, 0, _, %xmm0
- VMOVHPDZ128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVHPDrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VMOVHPDZ128rm %xmm0, %rdi, 1, _, 0, _
- ; CHECK: VMOVHPSmr %rdi, 1, _, 0, _, %xmm0
- VMOVHPSZ128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVHPSrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VMOVHPSZ128rm %xmm0, %rdi, 1, _, 0, _
- ; CHECK: VMOVLPDmr %rdi, 1, _, 0, _, %xmm0
- VMOVLPDZ128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVLPDrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VMOVLPDZ128rm %xmm0, %rdi, 1, _, 0, _
- ; CHECK: VMOVLPSmr %rdi, 1, _, 0, _, %xmm0
- VMOVLPSZ128mr %rdi, 1, _, 0, _, %xmm0
- ; CHECK: %xmm0 = VMOVLPSrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VMOVLPSZ128rm %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = VMAXCPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMAXCPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: VMOVHPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVHPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVHPDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VMOVHPDZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: VMOVHPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVHPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVHPSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VMOVHPSZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: VMOVLPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVLPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVLPDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VMOVLPDZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: VMOVLPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ VMOVLPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
+ ; CHECK: %xmm0 = VMOVLPSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VMOVLPSZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VMAXCPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMAXCPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMAXCPDrr %xmm0, %xmm1
%xmm0 = VMAXCPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMAXCPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMAXCPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMAXCPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMAXCPSrr %xmm0, %xmm1
%xmm0 = VMAXCPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMAXPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMAXCPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMAXPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMAXCPDrr %xmm0, %xmm1
%xmm0 = VMAXPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMAXPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMAXCPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMAXPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMAXCPSrr %xmm0, %xmm1
%xmm0 = VMAXPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMINCPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMINCPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMINCPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMINCPDrr %xmm0, %xmm1
%xmm0 = VMINCPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMINCPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMINCPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMINCPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMINCPSrr %xmm0, %xmm1
%xmm0 = VMINCPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMINPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMINCPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMINPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMINCPDrr %xmm0, %xmm1
%xmm0 = VMINPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMINPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMINCPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMINPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMINCPSrr %xmm0, %xmm1
%xmm0 = VMINPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMULPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMULPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMULPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMULPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMULPDrr %xmm0, %xmm1
%xmm0 = VMULPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMULPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMULPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMULPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMULPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMULPSrr %xmm0, %xmm1
%xmm0 = VMULPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VORPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VORPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VORPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VORPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VORPDrr %xmm0, %xmm1
%xmm0 = VORPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VORPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VORPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VORPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VORPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VORPSrr %xmm0, %xmm1
%xmm0 = VORPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPADDBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPADDBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPADDBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPADDBrr %xmm0, %xmm1
%xmm0 = VPADDBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPADDDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPADDDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPADDDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPADDDrr %xmm0, %xmm1
%xmm0 = VPADDDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDQrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPADDQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPADDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPADDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPADDQrr %xmm0, %xmm1
%xmm0 = VPADDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDSBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPADDSBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPADDSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPADDSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPADDSBrr %xmm0, %xmm1
%xmm0 = VPADDSBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDSWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPADDSWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPADDSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPADDSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPADDSWrr %xmm0, %xmm1
%xmm0 = VPADDSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDUSBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPADDUSBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPADDUSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPADDUSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPADDUSBrr %xmm0, %xmm1
%xmm0 = VPADDUSBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDUSWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPADDUSWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPADDUSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPADDUSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPADDUSWrr %xmm0, %xmm1
%xmm0 = VPADDUSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPADDWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPADDWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPADDWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPADDWrr %xmm0, %xmm1
%xmm0 = VPADDWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPANDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPANDDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPANDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPANDDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPANDrr %xmm0, %xmm1
%xmm0 = VPANDDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPANDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPANDQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPANDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPANDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPANDrr %xmm0, %xmm1
%xmm0 = VPANDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPANDNrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPANDNDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPANDNrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPANDNDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPANDNrr %xmm0, %xmm1
%xmm0 = VPANDNDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPANDNrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPANDNQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPANDNrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPANDNQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPANDNrr %xmm0, %xmm1
%xmm0 = VPANDNQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPAVGBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPAVGBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPAVGBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPAVGBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPAVGBrr %xmm0, %xmm1
%xmm0 = VPAVGBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPAVGWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPAVGWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPAVGWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPAVGWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPAVGWrr %xmm0, %xmm1
%xmm0 = VPAVGWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMAXSBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMAXSBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMAXSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMAXSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMAXSBrr %xmm0, %xmm1
%xmm0 = VPMAXSBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMAXSDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMAXSDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMAXSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMAXSDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMAXSDrr %xmm0, %xmm1
%xmm0 = VPMAXSDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMAXSWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMAXSWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMAXSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMAXSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMAXSWrr %xmm0, %xmm1
%xmm0 = VPMAXSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMAXUBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMAXUBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMAXUBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMAXUBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMAXUBrr %xmm0, %xmm1
%xmm0 = VPMAXUBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMAXUDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMAXUDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMAXUDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMAXUDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMAXUDrr %xmm0, %xmm1
%xmm0 = VPMAXUDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMAXUWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMAXUWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMAXUWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMAXUWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMAXUWrr %xmm0, %xmm1
%xmm0 = VPMAXUWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMINSBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMINSBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMINSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMINSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMINSBrr %xmm0, %xmm1
%xmm0 = VPMINSBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMINSDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMINSDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMINSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMINSDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMINSDrr %xmm0, %xmm1
%xmm0 = VPMINSDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMINSWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMINSWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMINSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMINSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMINSWrr %xmm0, %xmm1
%xmm0 = VPMINSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMINUBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMINUBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMINUBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMINUBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMINUBrr %xmm0, %xmm1
%xmm0 = VPMINUBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMINUDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMINUDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMINUDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMINUDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMINUDrr %xmm0, %xmm1
%xmm0 = VPMINUDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMINUWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMINUWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMINUWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMINUWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMINUWrr %xmm0, %xmm1
%xmm0 = VPMINUWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULDQrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMULDQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMULDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMULDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMULDQrr %xmm0, %xmm1
%xmm0 = VPMULDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULHRSWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMULHRSWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMULHRSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMULHRSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMULHRSWrr %xmm0, %xmm1
%xmm0 = VPMULHRSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULHUWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMULHUWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMULHUWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMULHUWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMULHUWrr %xmm0, %xmm1
%xmm0 = VPMULHUWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULHWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMULHWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMULHWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMULHWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMULHWrr %xmm0, %xmm1
%xmm0 = VPMULHWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULLDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMULLDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMULLDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMULLDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMULLDrr %xmm0, %xmm1
%xmm0 = VPMULLDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULLWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMULLWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMULLWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMULLWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMULLWrr %xmm0, %xmm1
%xmm0 = VPMULLWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULUDQrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMULUDQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMULUDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMULUDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMULUDQrr %xmm0, %xmm1
%xmm0 = VPMULUDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPORrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPORDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPORrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPORDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPORrr %xmm0, %xmm1
%xmm0 = VPORDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPORrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPORQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPORrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPORQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPORrr %xmm0, %xmm1
%xmm0 = VPORQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSUBBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSUBBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSUBBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSUBBrr %xmm0, %xmm1
%xmm0 = VPSUBBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSUBDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSUBDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSUBDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSUBDrr %xmm0, %xmm1
%xmm0 = VPSUBDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBQrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSUBQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSUBQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSUBQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSUBQrr %xmm0, %xmm1
%xmm0 = VPSUBQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBSBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSUBSBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSUBSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSUBSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSUBSBrr %xmm0, %xmm1
%xmm0 = VPSUBSBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBSWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSUBSWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSUBSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSUBSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSUBSWrr %xmm0, %xmm1
%xmm0 = VPSUBSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBUSBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSUBUSBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSUBUSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSUBUSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSUBUSBrr %xmm0, %xmm1
%xmm0 = VPSUBUSBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBUSWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSUBUSWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSUBUSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSUBUSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSUBUSWrr %xmm0, %xmm1
%xmm0 = VPSUBUSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSUBWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSUBWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSUBWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSUBWrr %xmm0, %xmm1
%xmm0 = VPSUBWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VADDPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VADDPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VADDPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VADDPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VADDPDrr %xmm0, %xmm1
%xmm0 = VADDPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VADDPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VADDPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VADDPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VADDPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VADDPSrr %xmm0, %xmm1
%xmm0 = VADDPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VANDNPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VANDNPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VANDNPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VANDNPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VANDNPDrr %xmm0, %xmm1
%xmm0 = VANDNPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VANDNPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VANDNPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VANDNPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VANDNPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VANDNPSrr %xmm0, %xmm1
%xmm0 = VANDNPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VANDPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VANDPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VANDPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VANDPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VANDPDrr %xmm0, %xmm1
%xmm0 = VANDPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VANDPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VANDPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VANDPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VANDPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VANDPSrr %xmm0, %xmm1
%xmm0 = VANDPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VDIVPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VDIVPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VDIVPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VDIVPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VDIVPDrr %xmm0, %xmm1
%xmm0 = VDIVPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VDIVPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VDIVPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VDIVPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VDIVPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VDIVPSrr %xmm0, %xmm1
%xmm0 = VDIVPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPXORrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPXORDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPXORrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPXORDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPXORrr %xmm0, %xmm1
%xmm0 = VPXORDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPXORrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPXORQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPXORrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPXORQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPXORrr %xmm0, %xmm1
%xmm0 = VPXORQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VSUBPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VSUBPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VSUBPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VSUBPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VSUBPDrr %xmm0, %xmm1
%xmm0 = VSUBPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VSUBPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VSUBPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VSUBPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VSUBPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VSUBPSrr %xmm0, %xmm1
%xmm0 = VSUBPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VXORPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VXORPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VXORPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VXORPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VXORPDrr %xmm0, %xmm1
%xmm0 = VXORPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VXORPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VXORPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VXORPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VXORPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VXORPSrr %xmm0, %xmm1
%xmm0 = VXORPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMADDUBSWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMADDUBSWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMADDUBSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMADDUBSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMADDUBSWrr %xmm0, %xmm1
%xmm0 = VPMADDUBSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMADDWDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPMADDWDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPMADDWDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPMADDWDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPMADDWDrr %xmm0, %xmm1
%xmm0 = VPMADDWDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPACKSSDWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPACKSSDWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPACKSSDWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPACKSSDWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPACKSSDWrr %xmm0, %xmm1
%xmm0 = VPACKSSDWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPACKSSWBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPACKSSWBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPACKSSWBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPACKSSWBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPACKSSWBrr %xmm0, %xmm1
%xmm0 = VPACKSSWBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPACKUSDWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPACKUSDWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPACKUSDWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPACKUSDWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPACKUSDWrr %xmm0, %xmm1
%xmm0 = VPACKUSDWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPACKUSWBrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPACKUSWBZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPACKUSWBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPACKUSWBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPACKUSWBrr %xmm0, %xmm1
%xmm0 = VPACKUSWBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKHBWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPUNPCKHBWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPUNPCKHBWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPUNPCKHBWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPUNPCKHBWrr %xmm0, %xmm1
%xmm0 = VPUNPCKHBWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKHDQrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPUNPCKHDQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPUNPCKHDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPUNPCKHDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPUNPCKHDQrr %xmm0, %xmm1
%xmm0 = VPUNPCKHDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKHQDQrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPUNPCKHQDQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPUNPCKHQDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPUNPCKHQDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPUNPCKHQDQrr %xmm0, %xmm1
%xmm0 = VPUNPCKHQDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKHWDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPUNPCKHWDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPUNPCKHWDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPUNPCKHWDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPUNPCKHWDrr %xmm0, %xmm1
%xmm0 = VPUNPCKHWDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKLBWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPUNPCKLBWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPUNPCKLBWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPUNPCKLBWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPUNPCKLBWrr %xmm0, %xmm1
%xmm0 = VPUNPCKLBWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKLDQrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPUNPCKLDQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPUNPCKLDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPUNPCKLDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPUNPCKLDQrr %xmm0, %xmm1
%xmm0 = VPUNPCKLDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKLQDQrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPUNPCKLQDQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPUNPCKLQDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPUNPCKLQDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPUNPCKLQDQrr %xmm0, %xmm1
%xmm0 = VPUNPCKLQDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKLWDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPUNPCKLWDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPUNPCKLWDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPUNPCKLWDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPUNPCKLWDrr %xmm0, %xmm1
%xmm0 = VPUNPCKLWDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VUNPCKHPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VUNPCKHPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VUNPCKHPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VUNPCKHPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VUNPCKHPDrr %xmm0, %xmm1
%xmm0 = VUNPCKHPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VUNPCKHPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VUNPCKHPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VUNPCKHPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VUNPCKHPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VUNPCKHPSrr %xmm0, %xmm1
%xmm0 = VUNPCKHPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VUNPCKLPDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VUNPCKLPDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VUNPCKLPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VUNPCKLPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VUNPCKLPDrr %xmm0, %xmm1
%xmm0 = VUNPCKLPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VUNPCKLPSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VUNPCKLPSZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VUNPCKLPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VUNPCKLPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VUNPCKLPSrr %xmm0, %xmm1
%xmm0 = VUNPCKLPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VFMADD132PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD132PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADD132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADD132PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD132PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD132PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD132PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADD132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADD132PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD132PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD213PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD213PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADD213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADD213PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD213PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD213PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD213PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADD213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADD213PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD213PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD231PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD231PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADD231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADD231PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD231PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD231PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD231PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADD231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADD231PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD231PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADDSUB132PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADDSUB132PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADDSUB132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADDSUB132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADDSUB132PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADDSUB132PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADDSUB132PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADDSUB132PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADDSUB132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADDSUB132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADDSUB132PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADDSUB132PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADDSUB213PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADDSUB213PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADDSUB213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADDSUB213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADDSUB213PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADDSUB213PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADDSUB213PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADDSUB213PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADDSUB213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADDSUB213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADDSUB213PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADDSUB213PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADDSUB231PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADDSUB231PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADDSUB231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADDSUB231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADDSUB231PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADDSUB231PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADDSUB231PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADDSUB231PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADDSUB231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADDSUB231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADDSUB231PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADDSUB231PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB132PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB132PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUB132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUB132PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB132PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB132PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB132PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUB132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUB132PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB132PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB213PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB213PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUB213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUB213PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB213PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB213PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB213PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUB213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUB213PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB213PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB231PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB231PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUB231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUB231PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB231PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB231PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB231PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUB231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUB231PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB231PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUBADD132PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUBADD132PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUBADD132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUBADD132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUBADD132PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUBADD132PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUBADD132PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUBADD132PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUBADD132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUBADD132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUBADD132PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUBADD132PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUBADD213PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUBADD213PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUBADD213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUBADD213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUBADD213PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUBADD213PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUBADD213PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUBADD213PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUBADD213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUBADD213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUBADD213PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUBADD213PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUBADD231PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUBADD231PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUBADD231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUBADD231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUBADD231PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUBADD231PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUBADD231PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUBADD231PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUBADD231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUBADD231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUBADD231PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUBADD231PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD132PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD132PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMADD132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMADD132PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD132PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD132PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD132PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMADD132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMADD132PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD132PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD213PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD213PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMADD213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMADD213PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD213PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD213PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD213PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMADD213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMADD213PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD213PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD231PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD231PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMADD231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMADD231PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD231PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD231PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD231PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMADD231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMADD231PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD231PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB132PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB132PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMSUB132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMSUB132PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB132PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB132PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB132PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMSUB132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMSUB132PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB132PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB213PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB213PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMSUB213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMSUB213PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB213PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB213PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB213PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMSUB213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMSUB213PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB213PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB231PDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB231PDZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMSUB231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMSUB231PDr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB231PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB231PSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB231PSZ128m %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMSUB231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMSUB231PSr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB231PSZ128r %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VPSLLDri %xmm0, 7
%xmm0 = VPSLLDZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSLLDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSLLDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSLLDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSLLDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSLLDrr %xmm0, 14
%xmm0 = VPSLLDZ128rr %xmm0, 14
; CHECK: %xmm0 = VPSLLQri %xmm0, 7
%xmm0 = VPSLLQZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSLLQrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSLLQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSLLQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSLLQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSLLQrr %xmm0, 14
%xmm0 = VPSLLQZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSLLVDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSLLVDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSLLVDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSLLVDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSLLVDrr %xmm0, 14
%xmm0 = VPSLLVDZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSLLVQrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSLLVQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSLLVQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSLLVQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSLLVQrr %xmm0, 14
%xmm0 = VPSLLVQZ128rr %xmm0, 14
; CHECK: %xmm0 = VPSLLWri %xmm0, 7
%xmm0 = VPSLLWZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSLLWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSLLWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSLLWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSLLWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSLLWrr %xmm0, 14
%xmm0 = VPSLLWZ128rr %xmm0, 14
; CHECK: %xmm0 = VPSRADri %xmm0, 7
%xmm0 = VPSRADZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSRADrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSRADZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSRADrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSRADZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSRADrr %xmm0, 14
%xmm0 = VPSRADZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSRAVDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSRAVDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSRAVDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSRAVDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSRAVDrr %xmm0, 14
%xmm0 = VPSRAVDZ128rr %xmm0, 14
; CHECK: %xmm0 = VPSRAWri %xmm0, 7
%xmm0 = VPSRAWZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSRAWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSRAWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSRAWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSRAWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSRAWrr %xmm0, 14
%xmm0 = VPSRAWZ128rr %xmm0, 14
; CHECK: %xmm0 = VPSRLDQri %xmm0, 14
%xmm0 = VPSRLDQZ128rr %xmm0, 14
; CHECK: %xmm0 = VPSRLDri %xmm0, 7
%xmm0 = VPSRLDZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSRLDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSRLDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSRLDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSRLDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSRLDrr %xmm0, 14
%xmm0 = VPSRLDZ128rr %xmm0, 14
; CHECK: %xmm0 = VPSRLQri %xmm0, 7
%xmm0 = VPSRLQZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSRLQrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSRLQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSRLQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSRLQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSRLQrr %xmm0, 14
%xmm0 = VPSRLQZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSRLVDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSRLVDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSRLVDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSRLVDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSRLVDrr %xmm0, 14
%xmm0 = VPSRLVDZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSRLVQrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSRLVQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSRLVQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSRLVQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSRLVQrr %xmm0, 14
%xmm0 = VPSRLVQZ128rr %xmm0, 14
; CHECK: %xmm0 = VPSRLWri %xmm0, 7
%xmm0 = VPSRLWZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSRLWrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VPSRLWZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPSRLWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPSRLWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPSRLWrr %xmm0, 14
%xmm0 = VPSRLWZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPERMILPDmi %rdi, 1, _, 0, _, _
- %xmm0 = VPERMILPDZ128mi %rdi, 1, _, 0, _, _
+ ; CHECK: %xmm0 = VPERMILPDmi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %xmm0 = VPERMILPDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %xmm0 = VPERMILPDri %xmm0, 9
%xmm0 = VPERMILPDZ128ri %xmm0, 9
- ; CHECK: %xmm0 = VPERMILPDrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VPERMILPDZ128rm %xmm0, %rdi, 1, _, 0, _
+ ; CHECK: %xmm0 = VPERMILPDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VPERMILPDZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VPERMILPDrr %xmm0, %xmm1
%xmm0 = VPERMILPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPERMILPSmi %rdi, 1, _, 0, _, _
- %xmm0 = VPERMILPSZ128mi %rdi, 1, _, 0, _, _
+ ; CHECK: %xmm0 = VPERMILPSmi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %xmm0 = VPERMILPSZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %xmm0 = VPERMILPSri %xmm0, 9
%xmm0 = VPERMILPSZ128ri %xmm0, 9
- ; CHECK: %xmm0 = VPERMILPSrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VPERMILPSZ128rm %xmm0, %rdi, 1, _, 0, _
+ ; CHECK: %xmm0 = VPERMILPSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VPERMILPSZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VPERMILPSrr %xmm0, %xmm1
%xmm0 = VPERMILPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VCVTPH2PSrm %rdi, %xmm0, 1, _, 0
- %xmm0 = VCVTPH2PSZ128rm %rdi, %xmm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTPH2PSrm %rdi, %xmm0, 1, %noreg, 0
+ %xmm0 = VCVTPH2PSZ128rm %rdi, %xmm0, 1, %noreg, 0
; CHECK: %xmm0 = VCVTPH2PSrr %xmm0
%xmm0 = VCVTPH2PSZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTDQ2PDrm %rdi, %xmm0, 1, _, 0
- %xmm0 = VCVTDQ2PDZ128rm %rdi, %xmm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTDQ2PDrm %rdi, %xmm0, 1, %noreg, 0
+ %xmm0 = VCVTDQ2PDZ128rm %rdi, %xmm0, 1, %noreg, 0
; CHECK: %xmm0 = VCVTDQ2PDrr %xmm0
%xmm0 = VCVTDQ2PDZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTDQ2PSrm %rdi, %xmm0, 1, _, 0
- %xmm0 = VCVTDQ2PSZ128rm %rdi, %xmm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTDQ2PSrm %rdi, %xmm0, 1, %noreg, 0
+ %xmm0 = VCVTDQ2PSZ128rm %rdi, %xmm0, 1, %noreg, 0
; CHECK: %xmm0 = VCVTDQ2PSrr %xmm0
%xmm0 = VCVTDQ2PSZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTPD2DQrm %rdi, %xmm0, 1, _, 0
- %xmm0 = VCVTPD2DQZ128rm %rdi, %xmm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTPD2DQrm %rdi, %xmm0, 1, %noreg, 0
+ %xmm0 = VCVTPD2DQZ128rm %rdi, %xmm0, 1, %noreg, 0
; CHECK: %xmm0 = VCVTPD2DQrr %xmm0
%xmm0 = VCVTPD2DQZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTPD2PSrm %rdi, %xmm0, 1, _, 0
- %xmm0 = VCVTPD2PSZ128rm %rdi, %xmm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTPD2PSrm %rdi, %xmm0, 1, %noreg, 0
+ %xmm0 = VCVTPD2PSZ128rm %rdi, %xmm0, 1, %noreg, 0
; CHECK: %xmm0 = VCVTPD2PSrr %xmm0
%xmm0 = VCVTPD2PSZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTPS2DQrm %rdi, %xmm0, 1, _, 0
- %xmm0 = VCVTPS2DQZ128rm %rdi, %xmm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTPS2DQrm %rdi, %xmm0, 1, %noreg, 0
+ %xmm0 = VCVTPS2DQZ128rm %rdi, %xmm0, 1, %noreg, 0
; CHECK: %xmm0 = VCVTPS2DQrr %xmm0
%xmm0 = VCVTPS2DQZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTPS2PDrm %rdi, %xmm0, 1, _, 0
- %xmm0 = VCVTPS2PDZ128rm %rdi, %xmm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTPS2PDrm %rdi, %xmm0, 1, %noreg, 0
+ %xmm0 = VCVTPS2PDZ128rm %rdi, %xmm0, 1, %noreg, 0
; CHECK: %xmm0 = VCVTPS2PDrr %xmm0
%xmm0 = VCVTPS2PDZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTTPD2DQrm %rdi, %xmm0, 1, _, 0
- %xmm0 = VCVTTPD2DQZ128rm %rdi, %xmm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTTPD2DQrm %rdi, %xmm0, 1, %noreg, 0
+ %xmm0 = VCVTTPD2DQZ128rm %rdi, %xmm0, 1, %noreg, 0
; CHECK: %xmm0 = VCVTTPD2DQrr %xmm0
%xmm0 = VCVTTPD2DQZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTTPS2DQrm %rdi, %xmm0, 1, _, 0
- %xmm0 = VCVTTPS2DQZ128rm %rdi, %xmm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTTPS2DQrm %rdi, %xmm0, 1, %noreg, 0
+ %xmm0 = VCVTTPS2DQZ128rm %rdi, %xmm0, 1, %noreg, 0
; CHECK: %xmm0 = VCVTTPS2DQrr %xmm0
%xmm0 = VCVTTPS2DQZ128rr %xmm0
- ; CHECK: %xmm0 = VSQRTPDm %rdi, _, _, _, _
- %xmm0 = VSQRTPDZ128m %rdi, _, _, _, _
+ ; CHECK: %xmm0 = VSQRTPDm %rdi, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VSQRTPDZ128m %rdi, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VSQRTPDr %xmm0
%xmm0 = VSQRTPDZ128r %xmm0
- ; CHECK: %xmm0 = VSQRTPSm %rdi, _, _, _, _
- %xmm0 = VSQRTPSZ128m %rdi, _, _, _, _
+ ; CHECK: %xmm0 = VSQRTPSm %rdi, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VSQRTPSZ128m %rdi, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VSQRTPSr %xmm0
%xmm0 = VSQRTPSZ128r %xmm0
- ; CHECK: %xmm0 = VMOVDDUPrm %rdi, 1, _, 0, _
- %xmm0 = VMOVDDUPZ128rm %rdi, 1, _, 0, _
+ ; CHECK: %xmm0 = VMOVDDUPrm %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VMOVDDUPZ128rm %rdi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VMOVDDUPrr %xmm0
%xmm0 = VMOVDDUPZ128rr %xmm0
- ; CHECK: %xmm0 = VMOVSHDUPrm %rdi, 1, _, 0, _
- %xmm0 = VMOVSHDUPZ128rm %rdi, 1, _, 0, _
+ ; CHECK: %xmm0 = VMOVSHDUPrm %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VMOVSHDUPZ128rm %rdi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VMOVSHDUPrr %xmm0
%xmm0 = VMOVSHDUPZ128rr %xmm0
- ; CHECK: %xmm0 = VMOVSLDUPrm %rdi, 1, _, 0, _
- %xmm0 = VMOVSLDUPZ128rm %rdi, 1, _, 0, _
+ ; CHECK: %xmm0 = VMOVSLDUPrm %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VMOVSLDUPZ128rm %rdi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VMOVSLDUPrr %xmm0
%xmm0 = VMOVSLDUPZ128rr %xmm0
- ; CHECK: %xmm0 = VPSHUFBrm %xmm0, _, _, _, _, _
- %xmm0 = VPSHUFBZ128rm %xmm0, _, _, _, _, _
+ ; CHECK: %xmm0 = VPSHUFBrm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VPSHUFBZ128rm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VPSHUFBrr %xmm0, %xmm1
%xmm0 = VPSHUFBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSHUFDmi %rdi, 1, _, 0, _, _
- %xmm0 = VPSHUFDZ128mi %rdi, 1, _, 0, _, _
+ ; CHECK: %xmm0 = VPSHUFDmi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %xmm0 = VPSHUFDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %xmm0 = VPSHUFDri %xmm0, -24
%xmm0 = VPSHUFDZ128ri %xmm0, -24
- ; CHECK: %xmm0 = VPSHUFHWmi %rdi, 1, _, 0, _, _
- %xmm0 = VPSHUFHWZ128mi %rdi, 1, _, 0, _, _
+ ; CHECK: %xmm0 = VPSHUFHWmi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %xmm0 = VPSHUFHWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %xmm0 = VPSHUFHWri %xmm0, -24
%xmm0 = VPSHUFHWZ128ri %xmm0, -24
- ; CHECK: %xmm0 = VPSHUFLWmi %rdi, 1, _, 0, _, _
- %xmm0 = VPSHUFLWZ128mi %rdi, 1, _, 0, _, _
+ ; CHECK: %xmm0 = VPSHUFLWmi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %xmm0 = VPSHUFLWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %xmm0 = VPSHUFLWri %xmm0, -24
%xmm0 = VPSHUFLWZ128ri %xmm0, -24
; CHECK: %xmm0 = VPSLLDQri %xmm0, %xmm1
%xmm0 = VPSLLDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VSHUFPDrmi %xmm0, _, _, _, _, _, _
- %xmm0 = VSHUFPDZ128rmi %xmm0, _, _, _, _, _, _
- ; CHECK: %xmm0 = VSHUFPDrri %xmm0, _, _
- %xmm0 = VSHUFPDZ128rri %xmm0, _, _
- ; CHECK: %xmm0 = VSHUFPSrmi %xmm0, _, _, _, _, _, _
- %xmm0 = VSHUFPSZ128rmi %xmm0, _, _, _, _, _, _
- ; CHECK: %xmm0 = VSHUFPSrri %xmm0, _, _
- %xmm0 = VSHUFPSZ128rri %xmm0, _, _
- ; CHECK: %xmm0 = VPSADBWrm %xmm0, 1, _, %rax, _, _
- %xmm0 = VPSADBWZ128rm %xmm0, 1, _, %rax, _, _
+ ; CHECK: %xmm0 = VSHUFPDrmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VSHUFPDZ128rmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm0 = VSHUFPDrri %xmm0, %noreg, %noreg
+ %xmm0 = VSHUFPDZ128rri %xmm0, %noreg, %noreg
+ ; CHECK: %xmm0 = VSHUFPSrmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VSHUFPSZ128rmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm0 = VSHUFPSrri %xmm0, %noreg, %noreg
+ %xmm0 = VSHUFPSZ128rri %xmm0, %noreg, %noreg
+ ; CHECK: %xmm0 = VPSADBWrm %xmm0, 1, %noreg, %rax, %noreg, %noreg
+ %xmm0 = VPSADBWZ128rm %xmm0, 1, %noreg, %rax, %noreg, %noreg
; CHECK: %xmm0 = VPSADBWrr %xmm0, %xmm1
%xmm0 = VPSADBWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VBROADCASTSSrm %rip, _, _, _, _
- %xmm0 = VBROADCASTSSZ128m %rip, _, _, _, _
+ ; CHECK: %xmm0 = VBROADCASTSSrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VBROADCASTSSZ128m %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VBROADCASTSSrr %xmm0
%xmm0 = VBROADCASTSSZ128r %xmm0
- ; CHECK: %xmm0 = VPBROADCASTBrm %rip, _, _, _, _
- %xmm0 = VPBROADCASTBZ128m %rip, _, _, _, _
+ ; CHECK: %xmm0 = VPBROADCASTBrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VPBROADCASTBZ128m %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VPBROADCASTBrr %xmm0
%xmm0 = VPBROADCASTBZ128r %xmm0
- ; CHECK: %xmm0 = VPBROADCASTDrm %rip, _, _, _, _
- %xmm0 = VPBROADCASTDZ128m %rip, _, _, _, _
+ ; CHECK: %xmm0 = VPBROADCASTDrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VPBROADCASTDZ128m %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VPBROADCASTDrr %xmm0
%xmm0 = VPBROADCASTDZ128r %xmm0
- ; CHECK: %xmm0 = VPBROADCASTQrm %rip, _, _, _, _
- %xmm0 = VPBROADCASTQZ128m %rip, _, _, _, _
+ ; CHECK: %xmm0 = VPBROADCASTQrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VPBROADCASTQZ128m %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VPBROADCASTQrr %xmm0
%xmm0 = VPBROADCASTQZ128r %xmm0
- ; CHECK: %xmm0 = VPBROADCASTWrm %rip, _, _, _, _
- %xmm0 = VPBROADCASTWZ128m %rip, _, _, _, _
+ ; CHECK: %xmm0 = VPBROADCASTWrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VPBROADCASTWZ128m %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VPBROADCASTWrr %xmm0
%xmm0 = VPBROADCASTWZ128r %xmm0
- ; CHECK: %xmm0 = VPBROADCASTQrm %rip, _, _, _, _
- %xmm0 = VBROADCASTI32X2Z128m %rip, _, _, _, _
+ ; CHECK: %xmm0 = VPBROADCASTQrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VBROADCASTI32X2Z128m %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VPBROADCASTQrr %xmm0
%xmm0 = VBROADCASTI32X2Z128r %xmm0
; CHECK: %xmm0 = VCVTPS2PHrr %xmm0, 2
%xmm0 = VCVTPS2PHZ128rr %xmm0, 2
- ; CHECK: VCVTPS2PHmr %rdi, %xmm0, 1, _, 0, _, _
- VCVTPS2PHZ128mr %rdi, %xmm0, 1, _, 0, _, _
- ; CHECK: %xmm0 = VPABSBrm %rip, 1, _, %rax, _
- %xmm0 = VPABSBZ128rm %rip, 1, _, %rax, _
+ ; CHECK: VCVTPS2PHmr %rdi, %xmm0, 1, %noreg, 0, %noreg, %noreg
+ VCVTPS2PHZ128mr %rdi, %xmm0, 1, %noreg, 0, %noreg, %noreg
+ ; CHECK: %xmm0 = VPABSBrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPABSBZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPABSBrr %xmm0
%xmm0 = VPABSBZ128rr %xmm0
- ; CHECK: %xmm0 = VPABSDrm %rip, 1, _, %rax, _
- %xmm0 = VPABSDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPABSDrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPABSDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPABSDrr %xmm0
%xmm0 = VPABSDZ128rr %xmm0
- ; CHECK: %xmm0 = VPABSWrm %rip, 1, _, %rax, _
- %xmm0 = VPABSWZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPABSWrm %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VPABSWZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VPABSWrr %xmm0
%xmm0 = VPABSWZ128rr %xmm0
- ; CHECK: %xmm0 = VPALIGNRrmi %xmm0, _, _, _, _, _, _
- %xmm0 = VPALIGNRZ128rmi %xmm0, _, _, _, _, _, _
+ ; CHECK: %xmm0 = VPALIGNRrmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VPALIGNRZ128rmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VPALIGNRrri %xmm0, %xmm1, 15
%xmm0 = VPALIGNRZ128rri %xmm0, %xmm1, 15
@@ -1770,552 +1770,552 @@ name: evex_scalar_to_vex_test
body: |
bb.0:
- ; CHECK: %xmm0 = VADDSDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VADDSDZrm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VADDSDrm_Int %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VADDSDZrm_Int %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VADDSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VADDSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm0 = VADDSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VADDSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VADDSDrr %xmm0, %xmm1
%xmm0 = VADDSDZrr %xmm0, %xmm1
; CHECK: %xmm0 = VADDSDrr_Int %xmm0, %xmm1
%xmm0 = VADDSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VADDSSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VADDSSZrm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VADDSSrm_Int %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VADDSSZrm_Int %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VADDSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VADDSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm0 = VADDSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VADDSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VADDSSrr %xmm0, %xmm1
%xmm0 = VADDSSZrr %xmm0, %xmm1
; CHECK: %xmm0 = VADDSSrr_Int %xmm0, %xmm1
%xmm0 = VADDSSZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VDIVSDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VDIVSDZrm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VDIVSDrm_Int %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VDIVSDZrm_Int %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VDIVSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VDIVSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm0 = VDIVSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VDIVSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VDIVSDrr %xmm0, %xmm1
%xmm0 = VDIVSDZrr %xmm0, %xmm1
; CHECK: %xmm0 = VDIVSDrr_Int %xmm0, %xmm1
%xmm0 = VDIVSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VDIVSSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VDIVSSZrm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VDIVSSrm_Int %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VDIVSSZrm_Int %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VDIVSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VDIVSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm0 = VDIVSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VDIVSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VDIVSSrr %xmm0, %xmm1
%xmm0 = VDIVSSZrr %xmm0, %xmm1
; CHECK: %xmm0 = VDIVSSrr_Int %xmm0, %xmm1
%xmm0 = VDIVSSZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCSDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMAXCSDZrm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMAXCSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMAXCSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMAXCSDrr %xmm0, %xmm1
%xmm0 = VMAXCSDZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCSSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMAXCSSZrm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMAXCSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMAXCSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMAXCSSrr %xmm0, %xmm1
%xmm0 = VMAXCSSZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCSDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMAXSDZrm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMAXSDrm_Int %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMAXSDZrm_Int %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMAXCSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMAXSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm0 = VMAXSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMAXSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMAXCSDrr %xmm0, %xmm1
%xmm0 = VMAXSDZrr %xmm0, %xmm1
; CHECK: %xmm0 = VMAXSDrr_Int %xmm0, %xmm1
%xmm0 = VMAXSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCSSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMAXSSZrm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMAXSSrm_Int %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMAXSSZrm_Int %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMAXCSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMAXSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm0 = VMAXSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMAXSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMAXCSSrr %xmm0, %xmm1
%xmm0 = VMAXSSZrr %xmm0, %xmm1
; CHECK: %xmm0 = VMAXSSrr_Int %xmm0, %xmm1
%xmm0 = VMAXSSZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCSDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMINCSDZrm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMINCSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMINCSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMINCSDrr %xmm0, %xmm1
%xmm0 = VMINCSDZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCSSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMINCSSZrm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMINCSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMINCSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMINCSSrr %xmm0, %xmm1
%xmm0 = VMINCSSZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCSDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMINSDZrm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMINSDrm_Int %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMINSDZrm_Int %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMINCSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMINSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm0 = VMINSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMINSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMINCSDrr %xmm0, %xmm1
%xmm0 = VMINSDZrr %xmm0, %xmm1
; CHECK: %xmm0 = VMINSDrr_Int %xmm0, %xmm1
%xmm0 = VMINSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCSSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMINSSZrm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMINSSrm_Int %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMINSSZrm_Int %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMINCSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMINSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm0 = VMINSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMINSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMINCSSrr %xmm0, %xmm1
%xmm0 = VMINSSZrr %xmm0, %xmm1
; CHECK: %xmm0 = VMINSSrr_Int %xmm0, %xmm1
%xmm0 = VMINSSZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMULSDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMULSDZrm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMULSDrm_Int %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMULSDZrm_Int %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMULSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMULSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm0 = VMULSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMULSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMULSDrr %xmm0, %xmm1
%xmm0 = VMULSDZrr %xmm0, %xmm1
; CHECK: %xmm0 = VMULSDrr_Int %xmm0, %xmm1
%xmm0 = VMULSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMULSSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMULSSZrm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMULSSrm_Int %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VMULSSZrm_Int %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMULSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMULSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm0 = VMULSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VMULSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VMULSSrr %xmm0, %xmm1
%xmm0 = VMULSSZrr %xmm0, %xmm1
; CHECK: %xmm0 = VMULSSrr_Int %xmm0, %xmm1
%xmm0 = VMULSSZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VSUBSDrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VSUBSDZrm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VSUBSDrm_Int %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VSUBSDZrm_Int %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VSUBSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VSUBSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm0 = VSUBSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VSUBSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VSUBSDrr %xmm0, %xmm1
%xmm0 = VSUBSDZrr %xmm0, %xmm1
; CHECK: %xmm0 = VSUBSDrr_Int %xmm0, %xmm1
%xmm0 = VSUBSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VSUBSSrm %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VSUBSSZrm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VSUBSSrm_Int %xmm0, %rip, 1, _, %rax, _
- %xmm0 = VSUBSSZrm_Int %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VSUBSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VSUBSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm0 = VSUBSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
+ %xmm0 = VSUBSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm0 = VSUBSSrr %xmm0, %xmm1
%xmm0 = VSUBSSZrr %xmm0, %xmm1
; CHECK: %xmm0 = VSUBSSrr_Int %xmm0, %xmm1
%xmm0 = VSUBSSZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VFMADD132SDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD132SDZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFMADD132SDm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD132SDZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADD132SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD132SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFMADD132SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD132SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADD132SDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD132SDZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFMADD132SDr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD132SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD132SSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD132SSZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFMADD132SSm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD132SSZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADD132SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD132SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFMADD132SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD132SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADD132SSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD132SSZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFMADD132SSr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD132SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD213SDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD213SDZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFMADD213SDm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD213SDZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADD213SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD213SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFMADD213SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD213SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADD213SDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD213SDZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFMADD213SDr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD213SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD213SSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD213SSZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFMADD213SSm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD213SSZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADD213SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD213SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFMADD213SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD213SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADD213SSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD213SSZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFMADD213SSr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD213SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD231SDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD231SDZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFMADD231SDm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD231SDZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADD231SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD231SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFMADD231SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD231SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADD231SDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD231SDZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFMADD231SDr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD231SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD231SSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD231SSZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFMADD231SSm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMADD231SSZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMADD231SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD231SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFMADD231SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMADD231SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMADD231SSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD231SSZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFMADD231SSr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFMADD231SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB132SDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB132SDZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFMSUB132SDm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB132SDZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUB132SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB132SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFMSUB132SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB132SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUB132SDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB132SDZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFMSUB132SDr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB132SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB132SSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB132SSZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFMSUB132SSm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB132SSZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUB132SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB132SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFMSUB132SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB132SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUB132SSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB132SSZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFMSUB132SSr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB132SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB213SDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB213SDZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFMSUB213SDm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB213SDZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUB213SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB213SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFMSUB213SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB213SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUB213SDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB213SDZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFMSUB213SDr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB213SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB213SSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB213SSZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFMSUB213SSm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB213SSZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUB213SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB213SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFMSUB213SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB213SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUB213SSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB213SSZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFMSUB213SSr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB213SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB231SDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB231SDZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFMSUB231SDm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB231SDZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUB231SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB231SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFMSUB231SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB231SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUB231SDr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB231SDZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFMSUB231SDr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB231SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB231SSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB231SSZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFMSUB231SSm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFMSUB231SSZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFMSUB231SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB231SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFMSUB231SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFMSUB231SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFMSUB231SSr %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB231SSZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFMSUB231SSr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFMSUB231SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD132SDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD132SDZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFNMADD132SDm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD132SDZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMADD132SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD132SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFNMADD132SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD132SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMADD132SDr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD132SDZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFNMADD132SDr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD132SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD132SSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD132SSZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFNMADD132SSm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD132SSZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMADD132SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD132SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFNMADD132SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD132SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMADD132SSr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD132SSZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFNMADD132SSr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD132SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD213SDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD213SDZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFNMADD213SDm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD213SDZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMADD213SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD213SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFNMADD213SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD213SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMADD213SDr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD213SDZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFNMADD213SDr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD213SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD213SSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD213SSZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFNMADD213SSm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD213SSZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMADD213SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD213SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFNMADD213SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD213SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMADD213SSr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD213SSZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFNMADD213SSr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD213SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD231SDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD231SDZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFNMADD231SDm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD231SDZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMADD231SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD231SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFNMADD231SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD231SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMADD231SDr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD231SDZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFNMADD231SDr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD231SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD231SSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD231SSZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFNMADD231SSm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMADD231SSZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMADD231SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD231SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFNMADD231SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMADD231SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMADD231SSr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD231SSZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFNMADD231SSr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFNMADD231SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB132SDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB132SDZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFNMSUB132SDm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB132SDZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMSUB132SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB132SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFNMSUB132SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB132SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMSUB132SDr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB132SDZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFNMSUB132SDr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB132SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB132SSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB132SSZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFNMSUB132SSm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB132SSZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMSUB132SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB132SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFNMSUB132SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB132SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMSUB132SSr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB132SSZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFNMSUB132SSr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB132SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB213SDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB213SDZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFNMSUB213SDm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB213SDZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMSUB213SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB213SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFNMSUB213SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB213SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMSUB213SDr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB213SDZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFNMSUB213SDr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB213SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB213SSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB213SSZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFNMSUB213SSm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB213SSZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMSUB213SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB213SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFNMSUB213SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB213SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMSUB213SSr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB213SSZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFNMSUB213SSr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB213SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB231SDm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB231SDZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFNMSUB231SDm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB231SDZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMSUB231SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB231SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFNMSUB231SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB231SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMSUB231SDr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB231SDZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFNMSUB231SDr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB231SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB231SSm %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB231SSZm %xmm0, %xmm0, %rsi, 1, _, 0, _
- ; CHECK: %xmm0 = VFNMSUB231SSm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
- %xmm0 = VFNMSUB231SSZm_Int %xmm0, %xmm0, %rsi, 1, _, 0, _
+ ; CHECK: %xmm0 = VFNMSUB231SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB231SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VFNMSUB231SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
+ %xmm0 = VFNMSUB231SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm0 = VFNMSUB231SSr %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB231SSZr %xmm0, %xmm1, %xmm2
; CHECK: %xmm0 = VFNMSUB231SSr_Int %xmm0, %xmm1, %xmm2
%xmm0 = VFNMSUB231SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: VPEXTRBmr %rdi, 1, _, 0, _, %xmm0, 3
- VPEXTRBZmr %rdi, 1, _, 0, _, %xmm0, 3
+ ; CHECK: VPEXTRBmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
+ VPEXTRBZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
; CHECK: %eax = VPEXTRBrr %xmm0, 1
%eax = VPEXTRBZrr %xmm0, 1
- ; CHECK: VPEXTRDmr %rdi, 1, _, 0, _, %xmm0, 3
- VPEXTRDZmr %rdi, 1, _, 0, _, %xmm0, 3
+ ; CHECK: VPEXTRDmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
+ VPEXTRDZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
; CHECK: %eax = VPEXTRDrr %xmm0, 1
%eax = VPEXTRDZrr %xmm0, 1
- ; CHECK: VPEXTRQmr %rdi, 1, _, 0, _, %xmm0, 3
- VPEXTRQZmr %rdi, 1, _, 0, _, %xmm0, 3
+ ; CHECK: VPEXTRQmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
+ VPEXTRQZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
; CHECK: %rax = VPEXTRQrr %xmm0, 1
%rax = VPEXTRQZrr %xmm0, 1
- ; CHECK: VPEXTRWmr %rdi, 1, _, 0, _, %xmm0, 3
- VPEXTRWZmr %rdi, 1, _, 0, _, %xmm0, 3
+ ; CHECK: VPEXTRWmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
+ VPEXTRWZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
; CHECK: %eax = VPEXTRWri %xmm0, 1
%eax = VPEXTRWZrr %xmm0, 1
; CHECK: %eax = VPEXTRWrr_REV %xmm0, 1
%eax = VPEXTRWZrr_REV %xmm0, 1
- ; CHECK: %xmm0 = VPINSRBrm %xmm0, %rsi, 1, _, 0, _, 3
- %xmm0 = VPINSRBZrm %xmm0, %rsi, 1, _, 0, _, 3
+ ; CHECK: %xmm0 = VPINSRBrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
+ %xmm0 = VPINSRBZrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
; CHECK: %xmm0 = VPINSRBrr %xmm0, %edi, 5
%xmm0 = VPINSRBZrr %xmm0, %edi, 5
- ; CHECK: %xmm0 = VPINSRDrm %xmm0, %rsi, 1, _, 0, _, 3
- %xmm0 = VPINSRDZrm %xmm0, %rsi, 1, _, 0, _, 3
+ ; CHECK: %xmm0 = VPINSRDrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
+ %xmm0 = VPINSRDZrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
; CHECK: %xmm0 = VPINSRDrr %xmm0, %edi, 5
%xmm0 = VPINSRDZrr %xmm0, %edi, 5
- ; CHECK: %xmm0 = VPINSRQrm %xmm0, %rsi, 1, _, 0, _, 3
- %xmm0 = VPINSRQZrm %xmm0, %rsi, 1, _, 0, _, 3
+ ; CHECK: %xmm0 = VPINSRQrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
+ %xmm0 = VPINSRQZrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
; CHECK: %xmm0 = VPINSRQrr %xmm0, %rdi, 5
%xmm0 = VPINSRQZrr %xmm0, %rdi, 5
- ; CHECK: %xmm0 = VPINSRWrmi %xmm0, %rsi, 1, _, 0, _, 3
- %xmm0 = VPINSRWZrm %xmm0, %rsi, 1, _, 0, _, 3
+ ; CHECK: %xmm0 = VPINSRWrmi %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
+ %xmm0 = VPINSRWZrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
; CHECK: %xmm0 = VPINSRWrri %xmm0, %edi, 5
%xmm0 = VPINSRWZrr %xmm0, %edi, 5
- ; CHECK: %xmm0 = VSQRTSDm %xmm0, _, _, _, _, _
- %xmm0 = VSQRTSDZm %xmm0, _, _, _, _, _
- ; CHECK: %xmm0 = VSQRTSDm_Int %xmm0, _, _, _, _, _
- %xmm0 = VSQRTSDZm_Int %xmm0, _, _, _, _, _
- ; CHECK: %xmm0 = VSQRTSDr %xmm0, _
- %xmm0 = VSQRTSDZr %xmm0, _
- ; CHECK: %xmm0 = VSQRTSDr_Int %xmm0, _
- %xmm0 = VSQRTSDZr_Int %xmm0, _
- ; CHECK: %xmm0 = VSQRTSSm %xmm0, _, _, _, _, _
- %xmm0 = VSQRTSSZm %xmm0, _, _, _, _, _
- ; CHECK: %xmm0 = VSQRTSSm_Int %xmm0, _, _, _, _, _
- %xmm0 = VSQRTSSZm_Int %xmm0, _, _, _, _, _
- ; CHECK: %xmm0 = VSQRTSSr %xmm0, _
- %xmm0 = VSQRTSSZr %xmm0, _
- ; CHECK: %xmm0 = VSQRTSSr_Int %xmm0, _
- %xmm0 = VSQRTSSZr_Int %xmm0, _
+ ; CHECK: %xmm0 = VSQRTSDm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VSQRTSDZm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm0 = VSQRTSDm_Int %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VSQRTSDZm_Int %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm0 = VSQRTSDr %xmm0, %noreg
+ %xmm0 = VSQRTSDZr %xmm0, %noreg
+ ; CHECK: %xmm0 = VSQRTSDr_Int %xmm0, %noreg
+ %xmm0 = VSQRTSDZr_Int %xmm0, %noreg
+ ; CHECK: %xmm0 = VSQRTSSm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VSQRTSSZm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm0 = VSQRTSSm_Int %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VSQRTSSZm_Int %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm0 = VSQRTSSr %xmm0, %noreg
+ %xmm0 = VSQRTSSZr %xmm0, %noreg
+ ; CHECK: %xmm0 = VSQRTSSr_Int %xmm0, %noreg
+ %xmm0 = VSQRTSSZr_Int %xmm0, %noreg
; CHECK: %rdi = VCVTSD2SI64rr %xmm0
%rdi = VCVTSD2SI64Zrr %xmm0
; CHECK: %edi = VCVTSD2SIrr %xmm0
%edi = VCVTSD2SIZrr %xmm0
- ; CHECK: %xmm0 = VCVTSD2SSrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VCVTSD2SSZrm %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = Int_VCVTSD2SSrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VCVTSD2SSZrm_Int %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = VCVTSD2SSrr %xmm0, _
- %xmm0 = VCVTSD2SSZrr %xmm0, _
- ; CHECK: %xmm0 = Int_VCVTSD2SSrr %xmm0, _
- %xmm0 = VCVTSD2SSZrr_Int %xmm0, _
- ; CHECK: %xmm0 = VCVTSI2SDrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VCVTSI2SDZrm %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = Int_VCVTSI2SDrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VCVTSI2SDZrm_Int %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = VCVTSI2SDrr %xmm0, _
- %xmm0 = VCVTSI2SDZrr %xmm0, _
- ; CHECK: %xmm0 = Int_VCVTSI2SDrr %xmm0, _
- %xmm0 = VCVTSI2SDZrr_Int %xmm0, _
- ; CHECK: %xmm0 = VCVTSI2SSrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VCVTSI2SSZrm %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = Int_VCVTSI2SSrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VCVTSI2SSZrm_Int %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = VCVTSI2SSrr %xmm0, _
- %xmm0 = VCVTSI2SSZrr %xmm0, _
- ; CHECK: %xmm0 = Int_VCVTSI2SSrr %xmm0, _
- %xmm0 = VCVTSI2SSZrr_Int %xmm0, _
- ; CHECK: %xmm0 = VCVTSI2SD64rm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VCVTSI642SDZrm %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = Int_VCVTSI2SD64rm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VCVTSI642SDZrm_Int %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = VCVTSI2SD64rr %xmm0, _
- %xmm0 = VCVTSI642SDZrr %xmm0, _
- ; CHECK: %xmm0 = Int_VCVTSI2SD64rr %xmm0, _
- %xmm0 = VCVTSI642SDZrr_Int %xmm0, _
- ; CHECK: %xmm0 = VCVTSI2SS64rm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VCVTSI642SSZrm %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = Int_VCVTSI2SS64rm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VCVTSI642SSZrm_Int %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = VCVTSI2SS64rr %xmm0, _
- %xmm0 = VCVTSI642SSZrr %xmm0, _
- ; CHECK: %xmm0 = Int_VCVTSI2SS64rr %xmm0, _
- %xmm0 = VCVTSI642SSZrr_Int %xmm0, _
- ; CHECK: %xmm0 = VCVTSS2SDrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VCVTSS2SDZrm %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = Int_VCVTSS2SDrm %xmm0, %rdi, 1, _, 0, _
- %xmm0 = VCVTSS2SDZrm_Int %xmm0, %rdi, 1, _, 0, _
- ; CHECK: %xmm0 = VCVTSS2SDrr %xmm0, _
- %xmm0 = VCVTSS2SDZrr %xmm0, _
- ; CHECK: %xmm0 = Int_VCVTSS2SDrr %xmm0, _
- %xmm0 = VCVTSS2SDZrr_Int %xmm0, _
- ; CHECK: %rdi = VCVTSS2SI64rm %rdi, %xmm0, 1, _, 0
- %rdi = VCVTSS2SI64Zrm %rdi, %xmm0, 1, _, 0
+ ; CHECK: %xmm0 = VCVTSD2SSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VCVTSD2SSZrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = Int_VCVTSD2SSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VCVTSD2SSZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VCVTSD2SSrr %xmm0, %noreg
+ %xmm0 = VCVTSD2SSZrr %xmm0, %noreg
+ ; CHECK: %xmm0 = Int_VCVTSD2SSrr %xmm0, %noreg
+ %xmm0 = VCVTSD2SSZrr_Int %xmm0, %noreg
+ ; CHECK: %xmm0 = VCVTSI2SDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VCVTSI2SDZrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = Int_VCVTSI2SDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VCVTSI2SDZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VCVTSI2SDrr %xmm0, %noreg
+ %xmm0 = VCVTSI2SDZrr %xmm0, %noreg
+ ; CHECK: %xmm0 = Int_VCVTSI2SDrr %xmm0, %noreg
+ %xmm0 = VCVTSI2SDZrr_Int %xmm0, %noreg
+ ; CHECK: %xmm0 = VCVTSI2SSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VCVTSI2SSZrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = Int_VCVTSI2SSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VCVTSI2SSZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VCVTSI2SSrr %xmm0, %noreg
+ %xmm0 = VCVTSI2SSZrr %xmm0, %noreg
+ ; CHECK: %xmm0 = Int_VCVTSI2SSrr %xmm0, %noreg
+ %xmm0 = VCVTSI2SSZrr_Int %xmm0, %noreg
+ ; CHECK: %xmm0 = VCVTSI2SD64rm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VCVTSI642SDZrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = Int_VCVTSI2SD64rm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VCVTSI642SDZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VCVTSI2SD64rr %xmm0, %noreg
+ %xmm0 = VCVTSI642SDZrr %xmm0, %noreg
+ ; CHECK: %xmm0 = Int_VCVTSI2SD64rr %xmm0, %noreg
+ %xmm0 = VCVTSI642SDZrr_Int %xmm0, %noreg
+ ; CHECK: %xmm0 = VCVTSI2SS64rm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VCVTSI642SSZrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = Int_VCVTSI2SS64rm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VCVTSI642SSZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VCVTSI2SS64rr %xmm0, %noreg
+ %xmm0 = VCVTSI642SSZrr %xmm0, %noreg
+ ; CHECK: %xmm0 = Int_VCVTSI2SS64rr %xmm0, %noreg
+ %xmm0 = VCVTSI642SSZrr_Int %xmm0, %noreg
+ ; CHECK: %xmm0 = VCVTSS2SDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VCVTSS2SDZrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = Int_VCVTSS2SDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
+ %xmm0 = VCVTSS2SDZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm0 = VCVTSS2SDrr %xmm0, %noreg
+ %xmm0 = VCVTSS2SDZrr %xmm0, %noreg
+ ; CHECK: %xmm0 = Int_VCVTSS2SDrr %xmm0, %noreg
+ %xmm0 = VCVTSS2SDZrr_Int %xmm0, %noreg
+ ; CHECK: %rdi = VCVTSS2SI64rm %rdi, %xmm0, 1, %noreg, 0
+ %rdi = VCVTSS2SI64Zrm %rdi, %xmm0, 1, %noreg, 0
; CHECK: %rdi = VCVTSS2SI64rr %xmm0
%rdi = VCVTSS2SI64Zrr %xmm0
- ; CHECK: %edi = VCVTSS2SIrm %rdi, %xmm0, 1, _, 0
- %edi = VCVTSS2SIZrm %rdi, %xmm0, 1, _, 0
+ ; CHECK: %edi = VCVTSS2SIrm %rdi, %xmm0, 1, %noreg, 0
+ %edi = VCVTSS2SIZrm %rdi, %xmm0, 1, %noreg, 0
; CHECK: %edi = VCVTSS2SIrr %xmm0
%edi = VCVTSS2SIZrr %xmm0
- ; CHECK: %rdi = VCVTTSD2SI64rm %rdi, %xmm0, 1, _, 0
- %rdi = VCVTTSD2SI64Zrm %rdi, %xmm0, 1, _, 0
- ; CHECK: %rdi = Int_VCVTTSD2SI64rm %rdi, %xmm0, 1, _, 0
- %rdi = VCVTTSD2SI64Zrm_Int %rdi, %xmm0, 1, _, 0
+ ; CHECK: %rdi = VCVTTSD2SI64rm %rdi, %xmm0, 1, %noreg, 0
+ %rdi = VCVTTSD2SI64Zrm %rdi, %xmm0, 1, %noreg, 0
+ ; CHECK: %rdi = Int_VCVTTSD2SI64rm %rdi, %xmm0, 1, %noreg, 0
+ %rdi = VCVTTSD2SI64Zrm_Int %rdi, %xmm0, 1, %noreg, 0
; CHECK: %rdi = VCVTTSD2SI64rr %xmm0
%rdi = VCVTTSD2SI64Zrr %xmm0
; CHECK: %rdi = Int_VCVTTSD2SI64rr %xmm0
%rdi = VCVTTSD2SI64Zrr_Int %xmm0
- ; CHECK: %edi = VCVTTSD2SIrm %rdi, %xmm0, 1, _, 0
- %edi = VCVTTSD2SIZrm %rdi, %xmm0, 1, _, 0
- ; CHECK: %edi = Int_VCVTTSD2SIrm %rdi, %xmm0, 1, _, 0
- %edi = VCVTTSD2SIZrm_Int %rdi, %xmm0, 1, _, 0
+ ; CHECK: %edi = VCVTTSD2SIrm %rdi, %xmm0, 1, %noreg, 0
+ %edi = VCVTTSD2SIZrm %rdi, %xmm0, 1, %noreg, 0
+ ; CHECK: %edi = Int_VCVTTSD2SIrm %rdi, %xmm0, 1, %noreg, 0
+ %edi = VCVTTSD2SIZrm_Int %rdi, %xmm0, 1, %noreg, 0
; CHECK: %edi = VCVTTSD2SIrr %xmm0
%edi = VCVTTSD2SIZrr %xmm0
; CHECK: %edi = Int_VCVTTSD2SIrr %xmm0
%edi = VCVTTSD2SIZrr_Int %xmm0
- ; CHECK: %rdi = VCVTTSS2SI64rm %rdi, %xmm0, 1, _, 0
- %rdi = VCVTTSS2SI64Zrm %rdi, %xmm0, 1, _, 0
- ; CHECK: %rdi = Int_VCVTTSS2SI64rm %rdi, %xmm0, 1, _, 0
- %rdi = VCVTTSS2SI64Zrm_Int %rdi, %xmm0, 1, _, 0
+ ; CHECK: %rdi = VCVTTSS2SI64rm %rdi, %xmm0, 1, %noreg, 0
+ %rdi = VCVTTSS2SI64Zrm %rdi, %xmm0, 1, %noreg, 0
+ ; CHECK: %rdi = Int_VCVTTSS2SI64rm %rdi, %xmm0, 1, %noreg, 0
+ %rdi = VCVTTSS2SI64Zrm_Int %rdi, %xmm0, 1, %noreg, 0
; CHECK: %rdi = VCVTTSS2SI64rr %xmm0
%rdi = VCVTTSS2SI64Zrr %xmm0
; CHECK: %rdi = Int_VCVTTSS2SI64rr %xmm0
%rdi = VCVTTSS2SI64Zrr_Int %xmm0
- ; CHECK: %edi = VCVTTSS2SIrm %rdi, %xmm0, 1, _, 0
- %edi = VCVTTSS2SIZrm %rdi, %xmm0, 1, _, 0
- ; CHECK: %edi = Int_VCVTTSS2SIrm %rdi, %xmm0, 1, _, 0
- %edi = VCVTTSS2SIZrm_Int %rdi, %xmm0, 1, _, 0
+ ; CHECK: %edi = VCVTTSS2SIrm %rdi, %xmm0, 1, %noreg, 0
+ %edi = VCVTTSS2SIZrm %rdi, %xmm0, 1, %noreg, 0
+ ; CHECK: %edi = Int_VCVTTSS2SIrm %rdi, %xmm0, 1, %noreg, 0
+ %edi = VCVTTSS2SIZrm_Int %rdi, %xmm0, 1, %noreg, 0
; CHECK: %edi = VCVTTSS2SIrr %xmm0
%edi = VCVTTSS2SIZrr %xmm0
; CHECK: %edi = Int_VCVTTSS2SIrr %xmm0
%edi = VCVTTSS2SIZrr_Int %xmm0
; CHECK: %xmm0 = VMOV64toSDrr %rdi
%xmm0 = VMOV64toSDZrr %rdi
- ; CHECK: %xmm0 = VMOVDI2SSrm %rip, _, _, _, _
- %xmm0 = VMOVDI2SSZrm %rip, _, _, _, _
+ ; CHECK: %xmm0 = VMOVDI2SSrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VMOVDI2SSZrm %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VMOVDI2SSrr %eax
%xmm0 = VMOVDI2SSZrr %eax
- ; CHECK: VMOVSDmr %rdi, %xmm0, _, _, _, _
- VMOVSDZmr %rdi, %xmm0, _, _, _, _
- ; CHECK: %xmm0 = VMOVSDrm %rip, _, _, _, _
- %xmm0 = VMOVSDZrm %rip, _, _, _, _
- ; CHECK: %xmm0 = VMOVSDrr %xmm0, _
- %xmm0 = VMOVSDZrr %xmm0, _
- ; CHECK: %xmm0 = VMOVSDrr_REV %xmm0, _
- %xmm0 = VMOVSDZrr_REV %xmm0, _
+ ; CHECK: VMOVSDmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
+ VMOVSDZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm0 = VMOVSDrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VMOVSDZrm %rip, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm0 = VMOVSDrr %xmm0, %noreg
+ %xmm0 = VMOVSDZrr %xmm0, %noreg
+ ; CHECK: %xmm0 = VMOVSDrr_REV %xmm0, %noreg
+ %xmm0 = VMOVSDZrr_REV %xmm0, %noreg
; CHECK: %rax = VMOVSDto64rr %xmm0
%rax = VMOVSDto64Zrr %xmm0
- ; CHECK: VMOVSDto64mr %rdi, %xmm0, _, _, _, _
- VMOVSDto64Zmr %rdi, %xmm0, _, _, _, _
- ; CHECK: VMOVSSmr %rdi, %xmm0, _, _, _, _
- VMOVSSZmr %rdi, %xmm0, _, _, _, _
- ; CHECK: %xmm0 = VMOVSSrm %rip, _, _, _, _
- %xmm0 = VMOVSSZrm %rip, _, _, _, _
- ; CHECK: %xmm0 = VMOVSSrr %xmm0, _
- %xmm0 = VMOVSSZrr %xmm0, _
- ; CHECK: %xmm0 = VMOVSSrr_REV %xmm0, _
- %xmm0 = VMOVSSZrr_REV %xmm0, _
- ; CHECK: VMOVSS2DImr %rdi, %xmm0, _, _, _, _
- VMOVSS2DIZmr %rdi, %xmm0, _, _, _, _
+ ; CHECK: VMOVSDto64mr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
+ VMOVSDto64Zmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: VMOVSSmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
+ VMOVSSZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm0 = VMOVSSrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VMOVSSZrm %rip, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm0 = VMOVSSrr %xmm0, %noreg
+ %xmm0 = VMOVSSZrr %xmm0, %noreg
+ ; CHECK: %xmm0 = VMOVSSrr_REV %xmm0, %noreg
+ %xmm0 = VMOVSSZrr_REV %xmm0, %noreg
+ ; CHECK: VMOVSS2DImr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
+ VMOVSS2DIZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
; CHECK: %eax = VMOVSS2DIrr %xmm0
%eax = VMOVSS2DIZrr %xmm0
; CHECK: %xmm0 = VMOV64toPQIrr %rdi
%xmm0 = VMOV64toPQIZrr %rdi
- ; CHECK: %xmm0 = VMOV64toPQIrm %rdi, _, _, _, _
- %xmm0 = VMOV64toPQIZrm %rdi, _, _, _, _
+ ; CHECK: %xmm0 = VMOV64toPQIrm %rdi, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VMOV64toPQIZrm %rdi, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VMOV64toSDrr %rdi
%xmm0 = VMOV64toSDZrr %rdi
- ; CHECK: %xmm0 = VMOVDI2PDIrm %rip, _, _, _, _
- %xmm0 = VMOVDI2PDIZrm %rip, _, _, _, _
+ ; CHECK: %xmm0 = VMOVDI2PDIrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VMOVDI2PDIZrm %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VMOVDI2PDIrr %edi
%xmm0 = VMOVDI2PDIZrr %edi
- ; CHECK: %xmm0 = VMOVLHPSrr %xmm0, _
- %xmm0 = VMOVLHPSZrr %xmm0, _
- ; CHECK: %xmm0 = VMOVHLPSrr %xmm0, _
- %xmm0 = VMOVHLPSZrr %xmm0, _
- ; CHECK: VMOVPDI2DImr %rdi, %xmm0, _, _, _, _
- VMOVPDI2DIZmr %rdi, %xmm0, _, _, _, _
+ ; CHECK: %xmm0 = VMOVLHPSrr %xmm0, %noreg
+ %xmm0 = VMOVLHPSZrr %xmm0, %noreg
+ ; CHECK: %xmm0 = VMOVHLPSrr %xmm0, %noreg
+ %xmm0 = VMOVHLPSZrr %xmm0, %noreg
+ ; CHECK: VMOVPDI2DImr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
+ VMOVPDI2DIZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
; CHECK: %edi = VMOVPDI2DIrr %xmm0
%edi = VMOVPDI2DIZrr %xmm0
; CHECK: %xmm0 = VMOVPQI2QIrr %xmm0
%xmm0 = VMOVPQI2QIZrr %xmm0
- ; CHECK: VMOVPQI2QImr %rdi, %xmm0, _, _, _, _
- VMOVPQI2QIZmr %rdi, %xmm0, _, _, _, _
+ ; CHECK: VMOVPQI2QImr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
+ VMOVPQI2QIZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
; CHECK: %rdi = VMOVPQIto64rr %xmm0
%rdi = VMOVPQIto64Zrr %xmm0
- ; CHECK: VMOVPQIto64mr %rdi, %xmm0, _, _, _, _
- VMOVPQIto64Zmr %rdi, %xmm0, _, _, _, _
- ; CHECK: %xmm0 = VMOVQI2PQIrm %rip, _, _, _, _
- %xmm0 = VMOVQI2PQIZrm %rip, _, _, _, _
+ ; CHECK: VMOVPQIto64mr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
+ VMOVPQIto64Zmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm0 = VMOVQI2PQIrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VMOVQI2PQIZrm %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm0 = VMOVZPQILo2PQIrr %xmm0
%xmm0 = VMOVZPQILo2PQIZrr %xmm0
- ; CHECK: Int_VCOMISDrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
- Int_VCOMISDZrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: Int_VCOMISDrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ Int_VCOMISDZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: Int_VCOMISDrr %xmm0, %xmm1, implicit-def %eflags
Int_VCOMISDZrr %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: Int_VCOMISSrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
- Int_VCOMISSZrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: Int_VCOMISSrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ Int_VCOMISSZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: Int_VCOMISSrr %xmm0, %xmm1, implicit-def %eflags
Int_VCOMISSZrr %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: Int_VUCOMISDrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
- Int_VUCOMISDZrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: Int_VUCOMISDrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ Int_VUCOMISDZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: Int_VUCOMISDrr %xmm0, %xmm1, implicit-def %eflags
Int_VUCOMISDZrr %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: Int_VUCOMISSrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
- Int_VUCOMISSZrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: Int_VUCOMISSrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ Int_VUCOMISSZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: Int_VUCOMISSrr %xmm0, %xmm1, implicit-def %eflags
Int_VUCOMISSZrr %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VCOMISDrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
- VCOMISDZrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: VCOMISDrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ VCOMISDZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: VCOMISDrr %xmm0, %xmm1, implicit-def %eflags
VCOMISDZrr %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VCOMISSrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
- VCOMISSZrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: VCOMISSrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ VCOMISSZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: VCOMISSrr %xmm0, %xmm1, implicit-def %eflags
VCOMISSZrr %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VUCOMISDrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
- VUCOMISDZrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: VUCOMISDrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ VUCOMISDZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: VUCOMISDrr %xmm0, %xmm1, implicit-def %eflags
VUCOMISDZrr %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VUCOMISSrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
- VUCOMISSZrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: VUCOMISSrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ VUCOMISSZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: VUCOMISSrr %xmm0, %xmm1, implicit-def %eflags
VUCOMISSZrr %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VEXTRACTPSmr %rdi, 1, _, 0, _, %xmm0, _
- VEXTRACTPSZmr %rdi, 1, _, 0, _, %xmm0, _
- ; CHECK: %eax = VEXTRACTPSrr %xmm0, _
- %eax = VEXTRACTPSZrr %xmm0, _
- ; CHECK: %xmm0 = VINSERTPSrm %xmm0, %rdi, _, _, _, _, _
- %xmm0 = VINSERTPSZrm %xmm0, %rdi, _, _, _, _, _
- ; CHECK: %xmm0 = VINSERTPSrr %xmm0, %xmm0, _
- %xmm0 = VINSERTPSZrr %xmm0, %xmm0, _
+ ; CHECK: VEXTRACTPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0, %noreg
+ VEXTRACTPSZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, %noreg
+ ; CHECK: %eax = VEXTRACTPSrr %xmm0, %noreg
+ %eax = VEXTRACTPSZrr %xmm0, %noreg
+ ; CHECK: %xmm0 = VINSERTPSrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm0 = VINSERTPSZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm0 = VINSERTPSrr %xmm0, %xmm0, %noreg
+ %xmm0 = VINSERTPSZrr %xmm0, %xmm0, %noreg
RET 0, %zmm0, %zmm1
...
@@ -2326,878 +2326,878 @@ body: |
name: evex_z256_to_evex_test
body: |
bb.0:
- ; CHECK: VMOVAPDZ256mr %rdi, 1, _, 0, _, %ymm16
- VMOVAPDZ256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: %ymm16 = VMOVAPDZ256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVAPDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVAPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVAPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: %ymm16 = VMOVAPDZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVAPDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMOVAPDZ256rr %ymm16
%ymm16 = VMOVAPDZ256rr %ymm16
; CHECK: %ymm16 = VMOVAPDZ256rr_REV %ymm16
%ymm16 = VMOVAPDZ256rr_REV %ymm16
- ; CHECK: VMOVAPSZ256mr %rdi, 1, _, 0, _, %ymm16
- VMOVAPSZ256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: %ymm16 = VMOVAPSZ256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVAPSZ256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVAPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVAPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: %ymm16 = VMOVAPSZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVAPSZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMOVAPSZ256rr %ymm16
%ymm16 = VMOVAPSZ256rr %ymm16
; CHECK: %ymm16 = VMOVAPSZ256rr_REV %ymm16
%ymm16 = VMOVAPSZ256rr_REV %ymm16
- ; CHECK: %ymm16 = VMOVDDUPZ256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVDDUPZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMOVDDUPZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVDDUPZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMOVDDUPZ256rr %ymm16
%ymm16 = VMOVDDUPZ256rr %ymm16
- ; CHECK: VMOVDQA32Z256mr %rdi, 1, _, 0, _, %ymm16
- VMOVDQA32Z256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: %ymm16 = VMOVDQA32Z256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVDQA32Z256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQA32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVDQA32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: %ymm16 = VMOVDQA32Z256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVDQA32Z256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMOVDQA32Z256rr %ymm16
%ymm16 = VMOVDQA32Z256rr %ymm16
; CHECK: %ymm16 = VMOVDQA32Z256rr_REV %ymm16
%ymm16 = VMOVDQA32Z256rr_REV %ymm16
- ; CHECK: VMOVDQA64Z256mr %rdi, 1, _, 0, _, %ymm16
- VMOVDQA64Z256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: %ymm16 = VMOVDQA64Z256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVDQA64Z256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQA64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVDQA64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: %ymm16 = VMOVDQA64Z256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVDQA64Z256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMOVDQA64Z256rr %ymm16
%ymm16 = VMOVDQA64Z256rr %ymm16
; CHECK: %ymm16 = VMOVDQA64Z256rr_REV %ymm16
%ymm16 = VMOVDQA64Z256rr_REV %ymm16
- ; CHECK: VMOVDQU16Z256mr %rdi, 1, _, 0, _, %ymm16
- VMOVDQU16Z256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: %ymm16 = VMOVDQU16Z256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVDQU16Z256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQU16Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVDQU16Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: %ymm16 = VMOVDQU16Z256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVDQU16Z256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMOVDQU16Z256rr %ymm16
%ymm16 = VMOVDQU16Z256rr %ymm16
; CHECK: %ymm16 = VMOVDQU16Z256rr_REV %ymm16
%ymm16 = VMOVDQU16Z256rr_REV %ymm16
- ; CHECK: VMOVDQU32Z256mr %rdi, 1, _, 0, _, %ymm16
- VMOVDQU32Z256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: %ymm16 = VMOVDQU32Z256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVDQU32Z256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQU32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVDQU32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: %ymm16 = VMOVDQU32Z256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVDQU32Z256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMOVDQU32Z256rr %ymm16
%ymm16 = VMOVDQU32Z256rr %ymm16
; CHECK: %ymm16 = VMOVDQU32Z256rr_REV %ymm16
%ymm16 = VMOVDQU32Z256rr_REV %ymm16
- ; CHECK: VMOVDQU64Z256mr %rdi, 1, _, 0, _, %ymm16
- VMOVDQU64Z256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: %ymm16 = VMOVDQU64Z256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVDQU64Z256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQU64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVDQU64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: %ymm16 = VMOVDQU64Z256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVDQU64Z256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMOVDQU64Z256rr %ymm16
%ymm16 = VMOVDQU64Z256rr %ymm16
; CHECK: %ymm16 = VMOVDQU64Z256rr_REV %ymm16
%ymm16 = VMOVDQU64Z256rr_REV %ymm16
- ; CHECK: VMOVDQU8Z256mr %rdi, 1, _, 0, _, %ymm16
- VMOVDQU8Z256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: %ymm16 = VMOVDQU8Z256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVDQU8Z256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQU8Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVDQU8Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: %ymm16 = VMOVDQU8Z256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVDQU8Z256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMOVDQU8Z256rr %ymm16
%ymm16 = VMOVDQU8Z256rr %ymm16
; CHECK: %ymm16 = VMOVDQU8Z256rr_REV %ymm16
%ymm16 = VMOVDQU8Z256rr_REV %ymm16
- ; CHECK: %ymm16 = VMOVNTDQAZ256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVNTDQAZ256rm %rip, 1, _, %rax, _
- ; CHECK: VMOVNTDQZ256mr %rdi, 1, _, 0, _, %ymm16
- VMOVNTDQZ256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: VMOVNTPDZ256mr %rdi, 1, _, 0, _, %ymm16
- VMOVNTPDZ256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: VMOVNTPSZ256mr %rdi, 1, _, 0, _, %ymm16
- VMOVNTPSZ256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: %ymm16 = VMOVSHDUPZ256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVSHDUPZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMOVNTDQAZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVNTDQAZ256rm %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: VMOVNTDQZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVNTDQZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: VMOVNTPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVNTPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: VMOVNTPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVNTPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: %ymm16 = VMOVSHDUPZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVSHDUPZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMOVSHDUPZ256rr %ymm16
%ymm16 = VMOVSHDUPZ256rr %ymm16
- ; CHECK: %ymm16 = VMOVSLDUPZ256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVSLDUPZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMOVSLDUPZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVSLDUPZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMOVSLDUPZ256rr %ymm16
%ymm16 = VMOVSLDUPZ256rr %ymm16
- ; CHECK: VMOVUPDZ256mr %rdi, 1, _, 0, _, %ymm16
- VMOVUPDZ256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: %ymm16 = VMOVUPDZ256rm %rip, 1, _, %rax, _
- %ymm16 = VMOVUPDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVUPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVUPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: %ymm16 = VMOVUPDZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMOVUPDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMOVUPDZ256rr %ymm16
%ymm16 = VMOVUPDZ256rr %ymm16
; CHECK: %ymm16 = VMOVUPDZ256rr_REV %ymm16
%ymm16 = VMOVUPDZ256rr_REV %ymm16
- ; CHECK: VMOVUPSZ256mr %rdi, 1, _, 0, _, %ymm16
- VMOVUPSZ256mr %rdi, 1, _, 0, _, %ymm16
- ; CHECK: %ymm16 = VPANDDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPANDDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: VMOVUPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ VMOVUPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
+ ; CHECK: %ymm16 = VPANDDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPANDDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPANDDZ256rr %ymm16, %ymm1
%ymm16 = VPANDDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPANDQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPANDQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPANDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPANDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPANDQZ256rr %ymm16, %ymm1
%ymm16 = VPANDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPANDNDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPANDNDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPANDNDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPANDNDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPANDNDZ256rr %ymm16, %ymm1
%ymm16 = VPANDNDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPANDNQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPANDNQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPANDNQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPANDNQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPANDNQZ256rr %ymm16, %ymm1
%ymm16 = VPANDNQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPAVGBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPAVGBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPAVGBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPAVGBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPAVGBZ256rr %ymm16, %ymm1
%ymm16 = VPAVGBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPAVGWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPAVGWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPAVGWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPAVGWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPAVGWZ256rr %ymm16, %ymm1
%ymm16 = VPAVGWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPADDBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPADDBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPADDBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPADDBZ256rr %ymm16, %ymm1
%ymm16 = VPADDBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPADDDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPADDDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPADDDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPADDDZ256rr %ymm16, %ymm1
%ymm16 = VPADDDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPADDQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPADDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPADDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPADDQZ256rr %ymm16, %ymm1
%ymm16 = VPADDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDSBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPADDSBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPADDSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPADDSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPADDSBZ256rr %ymm16, %ymm1
%ymm16 = VPADDSBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDSWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPADDSWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPADDSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPADDSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPADDSWZ256rr %ymm16, %ymm1
%ymm16 = VPADDSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDUSBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPADDUSBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPADDUSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPADDUSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPADDUSBZ256rr %ymm16, %ymm1
%ymm16 = VPADDUSBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDUSWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPADDUSWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPADDUSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPADDUSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPADDUSWZ256rr %ymm16, %ymm1
%ymm16 = VPADDUSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPADDWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPADDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPADDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPADDWZ256rr %ymm16, %ymm1
%ymm16 = VPADDWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMULPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VMULPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMULPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMULPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMULPDZ256rr %ymm16, %ymm1
%ymm16 = VMULPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMULPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VMULPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMULPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMULPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMULPSZ256rr %ymm16, %ymm1
%ymm16 = VMULPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VORPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VORPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VORPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VORPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VORPDZ256rr %ymm16, %ymm1
%ymm16 = VORPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VORPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VORPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VORPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VORPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VORPSZ256rr %ymm16, %ymm1
%ymm16 = VORPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMADDUBSWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMADDUBSWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMADDUBSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMADDUBSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMADDUBSWZ256rr %ymm16, %ymm1
%ymm16 = VPMADDUBSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMADDWDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMADDWDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMADDWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMADDWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMADDWDZ256rr %ymm16, %ymm1
%ymm16 = VPMADDWDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMAXSBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMAXSBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMAXSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMAXSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMAXSBZ256rr %ymm16, %ymm1
%ymm16 = VPMAXSBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMAXSDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMAXSDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMAXSDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMAXSDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMAXSDZ256rr %ymm16, %ymm1
%ymm16 = VPMAXSDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMAXSWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMAXSWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMAXSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMAXSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMAXSWZ256rr %ymm16, %ymm1
%ymm16 = VPMAXSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMAXUBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMAXUBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMAXUBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMAXUBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMAXUBZ256rr %ymm16, %ymm1
%ymm16 = VPMAXUBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMAXUDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMAXUDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMAXUDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMAXUDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMAXUDZ256rr %ymm16, %ymm1
%ymm16 = VPMAXUDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMAXUWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMAXUWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMAXUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMAXUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMAXUWZ256rr %ymm16, %ymm1
%ymm16 = VPMAXUWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMINSBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMINSBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMINSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMINSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMINSBZ256rr %ymm16, %ymm1
%ymm16 = VPMINSBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMINSDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMINSDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMINSDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMINSDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMINSDZ256rr %ymm16, %ymm1
%ymm16 = VPMINSDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMINSWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMINSWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMINSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMINSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMINSWZ256rr %ymm16, %ymm1
%ymm16 = VPMINSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMINUBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMINUBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMINUBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMINUBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMINUBZ256rr %ymm16, %ymm1
%ymm16 = VPMINUBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMINUDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMINUDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMINUDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMINUDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMINUDZ256rr %ymm16, %ymm1
%ymm16 = VPMINUDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMINUWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMINUWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMINUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMINUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMINUWZ256rr %ymm16, %ymm1
%ymm16 = VPMINUWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULDQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMULDQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMULDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMULDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMULDQZ256rr %ymm16, %ymm1
%ymm16 = VPMULDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULHRSWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMULHRSWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMULHRSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMULHRSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMULHRSWZ256rr %ymm16, %ymm1
%ymm16 = VPMULHRSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULHUWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMULHUWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMULHUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMULHUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMULHUWZ256rr %ymm16, %ymm1
%ymm16 = VPMULHUWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULHWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMULHWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMULHWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMULHWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMULHWZ256rr %ymm16, %ymm1
%ymm16 = VPMULHWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULLDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMULLDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMULLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMULLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMULLDZ256rr %ymm16, %ymm1
%ymm16 = VPMULLDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULLWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMULLWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMULLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMULLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMULLWZ256rr %ymm16, %ymm1
%ymm16 = VPMULLWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULUDQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPMULUDQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMULUDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMULUDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMULUDQZ256rr %ymm16, %ymm1
%ymm16 = VPMULUDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPORDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPORDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPORDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPORDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPORDZ256rr %ymm16, %ymm1
%ymm16 = VPORDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPORQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPORQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPORQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPORQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPORQZ256rr %ymm16, %ymm1
%ymm16 = VPORQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSUBBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSUBBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSUBBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSUBBZ256rr %ymm16, %ymm1
%ymm16 = VPSUBBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSUBDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSUBDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSUBDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSUBDZ256rr %ymm16, %ymm1
%ymm16 = VPSUBDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSUBQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSUBQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSUBQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSUBQZ256rr %ymm16, %ymm1
%ymm16 = VPSUBQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBSBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSUBSBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSUBSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSUBSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSUBSBZ256rr %ymm16, %ymm1
%ymm16 = VPSUBSBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBSWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSUBSWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSUBSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSUBSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSUBSWZ256rr %ymm16, %ymm1
%ymm16 = VPSUBSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBUSBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSUBUSBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSUBUSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSUBUSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSUBUSBZ256rr %ymm16, %ymm1
%ymm16 = VPSUBUSBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBUSWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSUBUSWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSUBUSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSUBUSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSUBUSWZ256rr %ymm16, %ymm1
%ymm16 = VPSUBUSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSUBWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSUBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSUBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSUBWZ256rr %ymm16, %ymm1
%ymm16 = VPSUBWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPXORDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPXORDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPXORDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPXORDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPXORDZ256rr %ymm16, %ymm1
%ymm16 = VPXORDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPXORQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPXORQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPXORQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPXORQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPXORQZ256rr %ymm16, %ymm1
%ymm16 = VPXORQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VADDPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VADDPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VADDPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VADDPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VADDPDZ256rr %ymm16, %ymm1
%ymm16 = VADDPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VADDPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VADDPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VADDPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VADDPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VADDPSZ256rr %ymm16, %ymm1
%ymm16 = VADDPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VANDNPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VANDNPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VANDNPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VANDNPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VANDNPDZ256rr %ymm16, %ymm1
%ymm16 = VANDNPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VANDNPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VANDNPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VANDNPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VANDNPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VANDNPSZ256rr %ymm16, %ymm1
%ymm16 = VANDNPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VANDPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VANDPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VANDPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VANDPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VANDPDZ256rr %ymm16, %ymm1
%ymm16 = VANDPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VANDPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VANDPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VANDPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VANDPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VANDPSZ256rr %ymm16, %ymm1
%ymm16 = VANDPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VDIVPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VDIVPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VDIVPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VDIVPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VDIVPDZ256rr %ymm16, %ymm1
%ymm16 = VDIVPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VDIVPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VDIVPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VDIVPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VDIVPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VDIVPSZ256rr %ymm16, %ymm1
%ymm16 = VDIVPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMAXCPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VMAXCPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMAXCPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMAXCPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMAXCPDZ256rr %ymm16, %ymm1
%ymm16 = VMAXCPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMAXCPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VMAXCPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMAXCPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMAXCPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMAXCPSZ256rr %ymm16, %ymm1
%ymm16 = VMAXCPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMAXPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VMAXPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMAXPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMAXPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMAXPDZ256rr %ymm16, %ymm1
%ymm16 = VMAXPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMAXPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VMAXPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMAXPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMAXPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMAXPSZ256rr %ymm16, %ymm1
%ymm16 = VMAXPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMINCPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VMINCPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMINCPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMINCPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMINCPDZ256rr %ymm16, %ymm1
%ymm16 = VMINCPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMINCPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VMINCPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMINCPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMINCPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMINCPSZ256rr %ymm16, %ymm1
%ymm16 = VMINCPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMINPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VMINPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMINPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMINPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMINPDZ256rr %ymm16, %ymm1
%ymm16 = VMINPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMINPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VMINPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VMINPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VMINPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VMINPSZ256rr %ymm16, %ymm1
%ymm16 = VMINPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VXORPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VXORPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VXORPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VXORPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VXORPDZ256rr %ymm16, %ymm1
%ymm16 = VXORPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VXORPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VXORPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VXORPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VXORPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VXORPSZ256rr %ymm16, %ymm1
%ymm16 = VXORPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPACKSSDWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPACKSSDWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPACKSSDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPACKSSDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPACKSSDWZ256rr %ymm16, %ymm1
%ymm16 = VPACKSSDWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPACKSSWBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPACKSSWBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPACKSSWBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPACKSSWBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPACKSSWBZ256rr %ymm16, %ymm1
%ymm16 = VPACKSSWBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPACKUSDWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPACKUSDWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPACKUSDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPACKUSDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPACKUSDWZ256rr %ymm16, %ymm1
%ymm16 = VPACKUSDWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPACKUSWBZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPACKUSWBZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPACKUSWBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPACKUSWBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPACKUSWBZ256rr %ymm16, %ymm1
%ymm16 = VPACKUSWBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VUNPCKHPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VUNPCKHPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VUNPCKHPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VUNPCKHPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VUNPCKHPDZ256rr %ymm16, %ymm1
%ymm16 = VUNPCKHPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VUNPCKHPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VUNPCKHPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VUNPCKHPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VUNPCKHPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VUNPCKHPSZ256rr %ymm16, %ymm1
%ymm16 = VUNPCKHPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VUNPCKLPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VUNPCKLPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VUNPCKLPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VUNPCKLPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VUNPCKLPDZ256rr %ymm16, %ymm1
%ymm16 = VUNPCKLPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VUNPCKLPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VUNPCKLPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VUNPCKLPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VUNPCKLPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VUNPCKLPSZ256rr %ymm16, %ymm1
%ymm16 = VUNPCKLPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VSUBPDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VSUBPDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VSUBPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VSUBPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VSUBPDZ256rr %ymm16, %ymm1
%ymm16 = VSUBPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VSUBPSZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VSUBPSZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VSUBPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VSUBPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VSUBPSZ256rr %ymm16, %ymm1
%ymm16 = VSUBPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKHBWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPUNPCKHBWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPUNPCKHBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPUNPCKHBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPUNPCKHBWZ256rr %ymm16, %ymm1
%ymm16 = VPUNPCKHBWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKHDQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPUNPCKHDQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPUNPCKHDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPUNPCKHDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPUNPCKHDQZ256rr %ymm16, %ymm1
%ymm16 = VPUNPCKHDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKHQDQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPUNPCKHQDQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPUNPCKHQDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPUNPCKHQDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPUNPCKHQDQZ256rr %ymm16, %ymm1
%ymm16 = VPUNPCKHQDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKHWDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPUNPCKHWDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPUNPCKHWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPUNPCKHWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPUNPCKHWDZ256rr %ymm16, %ymm1
%ymm16 = VPUNPCKHWDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKLBWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPUNPCKLBWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPUNPCKLBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPUNPCKLBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPUNPCKLBWZ256rr %ymm16, %ymm1
%ymm16 = VPUNPCKLBWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKLDQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPUNPCKLDQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPUNPCKLDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPUNPCKLDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPUNPCKLDQZ256rr %ymm16, %ymm1
%ymm16 = VPUNPCKLDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKLQDQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPUNPCKLQDQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPUNPCKLQDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPUNPCKLQDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPUNPCKLQDQZ256rr %ymm16, %ymm1
%ymm16 = VPUNPCKLQDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKLWDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPUNPCKLWDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPUNPCKLWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPUNPCKLWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPUNPCKLWDZ256rr %ymm16, %ymm1
%ymm16 = VPUNPCKLWDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VFMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMADD132PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMADD132PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMADD132PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMADD132PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMADD213PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMADD213PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMADD213PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMADD213PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMADD231PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMADD231PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMADD231PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMADD231PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADDSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMADDSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMADDSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMADDSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMADDSUB132PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMADDSUB132PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADDSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMADDSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMADDSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMADDSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMADDSUB132PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMADDSUB132PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADDSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMADDSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMADDSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMADDSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMADDSUB213PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMADDSUB213PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADDSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMADDSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMADDSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMADDSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMADDSUB213PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMADDSUB213PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADDSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMADDSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMADDSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMADDSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMADDSUB231PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMADDSUB231PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADDSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMADDSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMADDSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMADDSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMADDSUB231PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMADDSUB231PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMSUB132PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMSUB132PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMSUB132PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMSUB132PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMSUB213PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMSUB213PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMSUB213PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMSUB213PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMSUB231PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMSUB231PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMSUB231PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMSUB231PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUBADD132PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMSUBADD132PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMSUBADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMSUBADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMSUBADD132PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMSUBADD132PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUBADD132PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMSUBADD132PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMSUBADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMSUBADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMSUBADD132PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMSUBADD132PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUBADD213PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMSUBADD213PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMSUBADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMSUBADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMSUBADD213PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMSUBADD213PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUBADD213PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMSUBADD213PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMSUBADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMSUBADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMSUBADD213PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMSUBADD213PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUBADD231PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMSUBADD231PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMSUBADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMSUBADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMSUBADD231PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMSUBADD231PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUBADD231PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFMSUBADD231PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFMSUBADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFMSUBADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFMSUBADD231PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFMSUBADD231PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFNMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFNMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFNMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFNMADD132PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFNMADD132PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFNMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFNMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFNMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFNMADD132PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFNMADD132PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFNMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFNMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFNMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFNMADD213PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFNMADD213PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFNMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFNMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFNMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFNMADD213PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFNMADD213PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFNMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFNMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFNMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFNMADD231PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFNMADD231PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFNMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFNMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFNMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFNMADD231PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFNMADD231PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFNMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFNMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFNMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFNMSUB132PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFNMSUB132PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFNMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFNMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFNMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFNMSUB132PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFNMSUB132PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFNMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFNMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFNMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFNMSUB213PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFNMSUB213PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFNMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFNMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFNMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFNMSUB213PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFNMSUB213PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFNMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFNMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFNMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFNMSUB231PDZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFNMSUB231PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
- %ymm16 = VFNMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, _, 0, _
+ ; CHECK: %ymm16 = VFNMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
+ %ymm16 = VFNMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VFNMSUB231PSZ256r %ymm16, %ymm1, %ymm2
%ymm16 = VFNMSUB231PSZ256r %ymm16, %ymm1, %ymm2
; CHECK: %ymm16 = VPSRADZ256ri %ymm16, 7
%ymm16 = VPSRADZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSRADZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSRADZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSRADZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSRADZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSRADZ256rr %ymm16, %xmm1
%ymm16 = VPSRADZ256rr %ymm16, %xmm1
- ; CHECK: %ymm16 = VPSRAVDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSRAVDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSRAVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSRAVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSRAVDZ256rr %ymm16, %ymm1
%ymm16 = VPSRAVDZ256rr %ymm16, %ymm1
; CHECK: %ymm16 = VPSRAWZ256ri %ymm16, 7
%ymm16 = VPSRAWZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSRAWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSRAWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSRAWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSRAWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSRAWZ256rr %ymm16, %xmm1
%ymm16 = VPSRAWZ256rr %ymm16, %xmm1
; CHECK: %ymm16 = VPSRLDQZ256rr %ymm16, %ymm1
%ymm16 = VPSRLDQZ256rr %ymm16, %ymm1
; CHECK: %ymm16 = VPSRLDZ256ri %ymm16, 7
%ymm16 = VPSRLDZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSRLDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSRLDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSRLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSRLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSRLDZ256rr %ymm16, %xmm1
%ymm16 = VPSRLDZ256rr %ymm16, %xmm1
; CHECK: %ymm16 = VPSRLQZ256ri %ymm16, 7
%ymm16 = VPSRLQZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSRLQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSRLQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSRLQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSRLQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSRLQZ256rr %ymm16, %xmm1
%ymm16 = VPSRLQZ256rr %ymm16, %xmm1
- ; CHECK: %ymm16 = VPSRLVDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSRLVDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSRLVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSRLVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSRLVDZ256rr %ymm16, %ymm1
%ymm16 = VPSRLVDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSRLVQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSRLVQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSRLVQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSRLVQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSRLVQZ256rr %ymm16, %ymm1
%ymm16 = VPSRLVQZ256rr %ymm16, %ymm1
; CHECK: %ymm16 = VPSRLWZ256ri %ymm16, 7
%ymm16 = VPSRLWZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSRLWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSRLWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSRLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSRLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSRLWZ256rr %ymm16, %xmm1
%ymm16 = VPSRLWZ256rr %ymm16, %xmm1
- ; CHECK: %ymm16 = VPMOVSXBDZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPMOVSXBDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMOVSXBDZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMOVSXBDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMOVSXBDZ256rr %xmm0
%ymm16 = VPMOVSXBDZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVSXBQZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPMOVSXBQZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMOVSXBQZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMOVSXBQZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMOVSXBQZ256rr %xmm0
%ymm16 = VPMOVSXBQZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVSXBWZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPMOVSXBWZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMOVSXBWZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMOVSXBWZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMOVSXBWZ256rr %xmm0
%ymm16 = VPMOVSXBWZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVSXDQZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPMOVSXDQZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMOVSXDQZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMOVSXDQZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMOVSXDQZ256rr %xmm0
%ymm16 = VPMOVSXDQZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVSXWDZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPMOVSXWDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMOVSXWDZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMOVSXWDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMOVSXWDZ256rr %xmm0
%ymm16 = VPMOVSXWDZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVSXWQZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPMOVSXWQZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMOVSXWQZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMOVSXWQZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMOVSXWQZ256rr %xmm0
%ymm16 = VPMOVSXWQZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVZXBDZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPMOVZXBDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMOVZXBDZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMOVZXBDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMOVZXBDZ256rr %xmm0
%ymm16 = VPMOVZXBDZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVZXBQZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPMOVZXBQZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMOVZXBQZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMOVZXBQZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMOVZXBQZ256rr %xmm0
%ymm16 = VPMOVZXBQZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVZXBWZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPMOVZXBWZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMOVZXBWZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMOVZXBWZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMOVZXBWZ256rr %xmm0
%ymm16 = VPMOVZXBWZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVZXDQZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPMOVZXDQZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMOVZXDQZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMOVZXDQZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMOVZXDQZ256rr %xmm0
%ymm16 = VPMOVZXDQZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVZXWDZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPMOVZXWDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMOVZXWDZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMOVZXWDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMOVZXWDZ256rr %xmm0
%ymm16 = VPMOVZXWDZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVZXWQZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPMOVZXWQZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPMOVZXWQZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPMOVZXWQZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPMOVZXWQZ256rr %xmm0
%ymm16 = VPMOVZXWQZ256rr %xmm0
- ; CHECK: %ymm16 = VBROADCASTF32X2Z256m %rip, 1, _, %rax, _
- %ymm16 = VBROADCASTF32X2Z256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VBROADCASTF32X2Z256m %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VBROADCASTF32X2Z256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VBROADCASTF32X2Z256r %xmm16
%ymm16 = VBROADCASTF32X2Z256r %xmm16
- ; CHECK: %ymm16 = VBROADCASTF32X4Z256rm %rip, 1, _, %rax, _
- %ymm16 = VBROADCASTF32X4Z256rm %rip, 1, _, %rax, _
- ; CHECK: %ymm16 = VBROADCASTSDZ256m %rip, 1, _, %rax, _
- %ymm16 = VBROADCASTSDZ256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VBROADCASTF32X4Z256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VBROADCASTF32X4Z256rm %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %ymm16 = VBROADCASTSDZ256m %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VBROADCASTSDZ256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VBROADCASTSDZ256r %xmm0
%ymm16 = VBROADCASTSDZ256r %xmm0
- ; CHECK: %ymm16 = VBROADCASTSSZ256m %rip, 1, _, %rax, _
- %ymm16 = VBROADCASTSSZ256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VBROADCASTSSZ256m %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VBROADCASTSSZ256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VBROADCASTSSZ256r %xmm0
%ymm16 = VBROADCASTSSZ256r %xmm0
- ; CHECK: %ymm16 = VPBROADCASTBZ256m %rip, 1, _, %rax, _
- %ymm16 = VPBROADCASTBZ256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPBROADCASTBZ256m %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPBROADCASTBZ256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPBROADCASTBZ256r %xmm0
%ymm16 = VPBROADCASTBZ256r %xmm0
- ; CHECK: %ymm16 = VPBROADCASTDZ256m %rip, 1, _, %rax, _
- %ymm16 = VPBROADCASTDZ256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPBROADCASTDZ256m %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPBROADCASTDZ256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPBROADCASTDZ256r %xmm0
%ymm16 = VPBROADCASTDZ256r %xmm0
- ; CHECK: %ymm16 = VPBROADCASTWZ256m %rip, 1, _, %rax, _
- %ymm16 = VPBROADCASTWZ256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPBROADCASTWZ256m %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPBROADCASTWZ256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPBROADCASTWZ256r %xmm0
%ymm16 = VPBROADCASTWZ256r %xmm0
- ; CHECK: %ymm16 = VBROADCASTI32X4Z256rm %rip, 1, _, %rax, _
- %ymm16 = VBROADCASTI32X4Z256rm %rip, 1, _, %rax, _
- ; CHECK: %ymm16 = VBROADCASTI32X2Z256m %rip, 1, _, %rax, _
- %ymm16 = VBROADCASTI32X2Z256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VBROADCASTI32X4Z256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VBROADCASTI32X4Z256rm %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %ymm16 = VBROADCASTI32X2Z256m %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VBROADCASTI32X2Z256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VBROADCASTI32X2Z256r %xmm16
%ymm16 = VBROADCASTI32X2Z256r %xmm16
- ; CHECK: %ymm16 = VPBROADCASTQZ256m %rip, 1, _, %rax, _
- %ymm16 = VPBROADCASTQZ256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPBROADCASTQZ256m %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPBROADCASTQZ256m %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPBROADCASTQZ256r %xmm0
%ymm16 = VPBROADCASTQZ256r %xmm0
- ; CHECK: %ymm16 = VPABSBZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPABSBZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPABSBZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPABSBZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPABSBZ256rr %ymm16
%ymm16 = VPABSBZ256rr %ymm16
- ; CHECK: %ymm16 = VPABSDZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPABSDZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPABSDZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPABSDZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPABSDZ256rr %ymm16
%ymm16 = VPABSDZ256rr %ymm16
- ; CHECK: %ymm16 = VPABSWZ256rm %rip, 1, _, %rax, _
- %ymm16 = VPABSWZ256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPABSWZ256rm %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPABSWZ256rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPABSWZ256rr %ymm16
%ymm16 = VPABSWZ256rr %ymm16
- ; CHECK: %ymm16 = VPSADBWZ256rm %ymm16, 1, _, %rax, _, _
- %ymm16 = VPSADBWZ256rm %ymm16, 1, _, %rax, _, _
+ ; CHECK: %ymm16 = VPSADBWZ256rm %ymm16, 1, %noreg, %rax, %noreg, %noreg
+ %ymm16 = VPSADBWZ256rm %ymm16, 1, %noreg, %rax, %noreg, %noreg
; CHECK: %ymm16 = VPSADBWZ256rr %ymm16, %ymm1
%ymm16 = VPSADBWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPERMDZ256rm %ymm16, %rdi, 1, _, 0, _
- %ymm16 = VPERMDZ256rm %ymm16, %rdi, 1, _, 0, _
+ ; CHECK: %ymm16 = VPERMDZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
+ %ymm16 = VPERMDZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VPERMDZ256rr %ymm1, %ymm16
%ymm16 = VPERMDZ256rr %ymm1, %ymm16
- ; CHECK: %ymm16 = VPERMILPDZ256mi %rdi, 1, _, 0, _, _
- %ymm16 = VPERMILPDZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm16 = VPERMILPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm16 = VPERMILPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm16 = VPERMILPDZ256ri %ymm16, 7
%ymm16 = VPERMILPDZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPERMILPDZ256rm %ymm16, %rdi, 1, _, 0, _
- %ymm16 = VPERMILPDZ256rm %ymm16, %rdi, 1, _, 0, _
+ ; CHECK: %ymm16 = VPERMILPDZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
+ %ymm16 = VPERMILPDZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VPERMILPDZ256rr %ymm1, %ymm16
%ymm16 = VPERMILPDZ256rr %ymm1, %ymm16
- ; CHECK: %ymm16 = VPERMILPSZ256mi %rdi, 1, _, 0, _, _
- %ymm16 = VPERMILPSZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm16 = VPERMILPSZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm16 = VPERMILPSZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm16 = VPERMILPSZ256ri %ymm16, 7
%ymm16 = VPERMILPSZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPERMILPSZ256rm %ymm16, %rdi, 1, _, 0, _
- %ymm16 = VPERMILPSZ256rm %ymm16, %rdi, 1, _, 0, _
+ ; CHECK: %ymm16 = VPERMILPSZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
+ %ymm16 = VPERMILPSZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VPERMILPSZ256rr %ymm1, %ymm16
%ymm16 = VPERMILPSZ256rr %ymm1, %ymm16
- ; CHECK: %ymm16 = VPERMPDZ256mi %rdi, 1, _, 0, _, _
- %ymm16 = VPERMPDZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm16 = VPERMPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm16 = VPERMPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm16 = VPERMPDZ256ri %ymm16, 7
%ymm16 = VPERMPDZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPERMPSZ256rm %ymm16, %rdi, 1, _, 0, _
- %ymm16 = VPERMPSZ256rm %ymm16, %rdi, 1, _, 0, _
+ ; CHECK: %ymm16 = VPERMPSZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
+ %ymm16 = VPERMPSZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VPERMPSZ256rr %ymm1, %ymm16
%ymm16 = VPERMPSZ256rr %ymm1, %ymm16
- ; CHECK: %ymm16 = VPERMQZ256mi %rdi, 1, _, 0, _, _
- %ymm16 = VPERMQZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm16 = VPERMQZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm16 = VPERMQZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm16 = VPERMQZ256ri %ymm16, 7
%ymm16 = VPERMQZ256ri %ymm16, 7
; CHECK: %ymm16 = VPSLLDQZ256rr %ymm16, 14
%ymm16 = VPSLLDQZ256rr %ymm16, 14
; CHECK: %ymm16 = VPSLLDZ256ri %ymm16, 7
%ymm16 = VPSLLDZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSLLDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSLLDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSLLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSLLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSLLDZ256rr %ymm16, 14
%ymm16 = VPSLLDZ256rr %ymm16, 14
; CHECK: %ymm16 = VPSLLQZ256ri %ymm16, 7
%ymm16 = VPSLLQZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSLLQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSLLQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSLLQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSLLQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSLLQZ256rr %ymm16, 14
%ymm16 = VPSLLQZ256rr %ymm16, 14
- ; CHECK: %ymm16 = VPSLLVDZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSLLVDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSLLVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSLLVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSLLVDZ256rr %ymm16, 14
%ymm16 = VPSLLVDZ256rr %ymm16, 14
- ; CHECK: %ymm16 = VPSLLVQZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSLLVQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSLLVQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSLLVQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSLLVQZ256rr %ymm16, 14
%ymm16 = VPSLLVQZ256rr %ymm16, 14
; CHECK: %ymm16 = VPSLLWZ256ri %ymm16, 7
%ymm16 = VPSLLWZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSLLWZ256rm %ymm16, %rip, 1, _, %rax, _
- %ymm16 = VPSLLWZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPSLLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
+ %ymm16 = VPSLLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %ymm16 = VPSLLWZ256rr %ymm16, 14
%ymm16 = VPSLLWZ256rr %ymm16, 14
- ; CHECK: %ymm16 = VCVTDQ2PDZ256rm %rdi, %ymm16, 1, _, 0
- %ymm16 = VCVTDQ2PDZ256rm %rdi, %ymm16, 1, _, 0
+ ; CHECK: %ymm16 = VCVTDQ2PDZ256rm %rdi, %ymm16, 1, %noreg, 0
+ %ymm16 = VCVTDQ2PDZ256rm %rdi, %ymm16, 1, %noreg, 0
; CHECK: %ymm16 = VCVTDQ2PDZ256rr %xmm0
%ymm16 = VCVTDQ2PDZ256rr %xmm0
- ; CHECK: %ymm16 = VCVTDQ2PSZ256rm %rdi, %ymm16, 1, _, 0
- %ymm16 = VCVTDQ2PSZ256rm %rdi, %ymm16, 1, _, 0
+ ; CHECK: %ymm16 = VCVTDQ2PSZ256rm %rdi, %ymm16, 1, %noreg, 0
+ %ymm16 = VCVTDQ2PSZ256rm %rdi, %ymm16, 1, %noreg, 0
; CHECK: %ymm16 = VCVTDQ2PSZ256rr %ymm16
%ymm16 = VCVTDQ2PSZ256rr %ymm16
- ; CHECK: %xmm0 = VCVTPD2DQZ256rm %rdi, %ymm16, 1, _, 0
- %xmm0 = VCVTPD2DQZ256rm %rdi, %ymm16, 1, _, 0
+ ; CHECK: %xmm0 = VCVTPD2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
+ %xmm0 = VCVTPD2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
; CHECK: %xmm0 = VCVTPD2DQZ256rr %ymm16
%xmm0 = VCVTPD2DQZ256rr %ymm16
- ; CHECK: %xmm0 = VCVTPD2PSZ256rm %rdi, %ymm16, 1, _, 0
- %xmm0 = VCVTPD2PSZ256rm %rdi, %ymm16, 1, _, 0
+ ; CHECK: %xmm0 = VCVTPD2PSZ256rm %rdi, %ymm16, 1, %noreg, 0
+ %xmm0 = VCVTPD2PSZ256rm %rdi, %ymm16, 1, %noreg, 0
; CHECK: %xmm0 = VCVTPD2PSZ256rr %ymm16
%xmm0 = VCVTPD2PSZ256rr %ymm16
- ; CHECK: %ymm16 = VCVTPS2DQZ256rm %rdi, %ymm16, 1, _, 0
- %ymm16 = VCVTPS2DQZ256rm %rdi, %ymm16, 1, _, 0
+ ; CHECK: %ymm16 = VCVTPS2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
+ %ymm16 = VCVTPS2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
; CHECK: %ymm16 = VCVTPS2DQZ256rr %ymm16
%ymm16 = VCVTPS2DQZ256rr %ymm16
- ; CHECK: %ymm16 = VCVTPS2PDZ256rm %rdi, %ymm16, 1, _, 0
- %ymm16 = VCVTPS2PDZ256rm %rdi, %ymm16, 1, _, 0
+ ; CHECK: %ymm16 = VCVTPS2PDZ256rm %rdi, %ymm16, 1, %noreg, 0
+ %ymm16 = VCVTPS2PDZ256rm %rdi, %ymm16, 1, %noreg, 0
; CHECK: %ymm16 = VCVTPS2PDZ256rr %xmm0
%ymm16 = VCVTPS2PDZ256rr %xmm0
- ; CHECK: VCVTPS2PHZ256mr %rdi, %ymm16, 1, _, 0, _, _
- VCVTPS2PHZ256mr %rdi, %ymm16, 1, _, 0, _, _
- ; CHECK: %xmm0 = VCVTPS2PHZ256rr %ymm16, _
- %xmm0 = VCVTPS2PHZ256rr %ymm16, _
- ; CHECK: %ymm16 = VCVTPH2PSZ256rm %rdi, %ymm16, 1, _, 0
- %ymm16 = VCVTPH2PSZ256rm %rdi, %ymm16, 1, _, 0
+ ; CHECK: VCVTPS2PHZ256mr %rdi, %ymm16, 1, %noreg, 0, %noreg, %noreg
+ VCVTPS2PHZ256mr %rdi, %ymm16, 1, %noreg, 0, %noreg, %noreg
+ ; CHECK: %xmm0 = VCVTPS2PHZ256rr %ymm16, %noreg
+ %xmm0 = VCVTPS2PHZ256rr %ymm16, %noreg
+ ; CHECK: %ymm16 = VCVTPH2PSZ256rm %rdi, %ymm16, 1, %noreg, 0
+ %ymm16 = VCVTPH2PSZ256rm %rdi, %ymm16, 1, %noreg, 0
; CHECK: %ymm16 = VCVTPH2PSZ256rr %xmm0
%ymm16 = VCVTPH2PSZ256rr %xmm0
- ; CHECK: %xmm0 = VCVTTPD2DQZ256rm %rdi, %ymm16, 1, _, 0
- %xmm0 = VCVTTPD2DQZ256rm %rdi, %ymm16, 1, _, 0
+ ; CHECK: %xmm0 = VCVTTPD2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
+ %xmm0 = VCVTTPD2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
; CHECK: %xmm0 = VCVTTPD2DQZ256rr %ymm16
%xmm0 = VCVTTPD2DQZ256rr %ymm16
- ; CHECK: %ymm16 = VCVTTPS2DQZ256rm %rdi, %ymm16, 1, _, 0
- %ymm16 = VCVTTPS2DQZ256rm %rdi, %ymm16, 1, _, 0
+ ; CHECK: %ymm16 = VCVTTPS2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
+ %ymm16 = VCVTTPS2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
; CHECK: %ymm16 = VCVTTPS2DQZ256rr %ymm16
%ymm16 = VCVTTPS2DQZ256rr %ymm16
- ; CHECK: %ymm16 = VSQRTPDZ256m %rdi, _, _, _, _
- %ymm16 = VSQRTPDZ256m %rdi, _, _, _, _
+ ; CHECK: %ymm16 = VSQRTPDZ256m %rdi, %noreg, %noreg, %noreg, %noreg
+ %ymm16 = VSQRTPDZ256m %rdi, %noreg, %noreg, %noreg, %noreg
; CHECK: %ymm16 = VSQRTPDZ256r %ymm16
%ymm16 = VSQRTPDZ256r %ymm16
- ; CHECK: %ymm16 = VSQRTPSZ256m %rdi, _, _, _, _
- %ymm16 = VSQRTPSZ256m %rdi, _, _, _, _
+ ; CHECK: %ymm16 = VSQRTPSZ256m %rdi, %noreg, %noreg, %noreg, %noreg
+ %ymm16 = VSQRTPSZ256m %rdi, %noreg, %noreg, %noreg, %noreg
; CHECK: %ymm16 = VSQRTPSZ256r %ymm16
%ymm16 = VSQRTPSZ256r %ymm16
- ; CHECK: %ymm16 = VPALIGNRZ256rmi %ymm16, %rdi, _, _, _, _, _
- %ymm16 = VPALIGNRZ256rmi %ymm16, %rdi, _, _, _, _, _
- ; CHECK: %ymm16 = VPALIGNRZ256rri %ymm16, %ymm1, _
- %ymm16 = VPALIGNRZ256rri %ymm16, %ymm1, _
- ; CHECK: %ymm16 = VMOVUPSZ256rm %rdi, 1, _, 0, _
- %ymm16 = VMOVUPSZ256rm %rdi, 1, _, 0, _
+ ; CHECK: %ymm16 = VPALIGNRZ256rmi %ymm16, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
+ %ymm16 = VPALIGNRZ256rmi %ymm16, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %ymm16 = VPALIGNRZ256rri %ymm16, %ymm1, %noreg
+ %ymm16 = VPALIGNRZ256rri %ymm16, %ymm1, %noreg
+ ; CHECK: %ymm16 = VMOVUPSZ256rm %rdi, 1, %noreg, 0, %noreg
+ %ymm16 = VMOVUPSZ256rm %rdi, 1, %noreg, 0, %noreg
; CHECK: %ymm16 = VMOVUPSZ256rr %ymm16
%ymm16 = VMOVUPSZ256rr %ymm16
; CHECK: %ymm16 = VMOVUPSZ256rr_REV %ymm16
%ymm16 = VMOVUPSZ256rr_REV %ymm16
- ; CHECK: %ymm16 = VPSHUFBZ256rm %ymm16, _, _, _, _, _
- %ymm16 = VPSHUFBZ256rm %ymm16, _, _, _, _, _
+ ; CHECK: %ymm16 = VPSHUFBZ256rm %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg
+ %ymm16 = VPSHUFBZ256rm %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg
; CHECK: %ymm16 = VPSHUFBZ256rr %ymm16, %ymm1
%ymm16 = VPSHUFBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSHUFDZ256mi %rdi, 1, _, 0, _, _
- %ymm16 = VPSHUFDZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm16 = VPSHUFDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm16 = VPSHUFDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm16 = VPSHUFDZ256ri %ymm16, -24
%ymm16 = VPSHUFDZ256ri %ymm16, -24
- ; CHECK: %ymm16 = VPSHUFHWZ256mi %rdi, 1, _, 0, _, _
- %ymm16 = VPSHUFHWZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm16 = VPSHUFHWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm16 = VPSHUFHWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm16 = VPSHUFHWZ256ri %ymm16, -24
%ymm16 = VPSHUFHWZ256ri %ymm16, -24
- ; CHECK: %ymm16 = VPSHUFLWZ256mi %rdi, 1, _, 0, _, _
- %ymm16 = VPSHUFLWZ256mi %rdi, 1, _, 0, _, _
+ ; CHECK: %ymm16 = VPSHUFLWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %ymm16 = VPSHUFLWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %ymm16 = VPSHUFLWZ256ri %ymm16, -24
%ymm16 = VPSHUFLWZ256ri %ymm16, -24
- ; CHECK: %ymm16 = VSHUFPDZ256rmi %ymm16, _, _, _, _, _, _
- %ymm16 = VSHUFPDZ256rmi %ymm16, _, _, _, _, _, _
- ; CHECK: %ymm16 = VSHUFPDZ256rri %ymm16, _, _
- %ymm16 = VSHUFPDZ256rri %ymm16, _, _
- ; CHECK: %ymm16 = VSHUFPSZ256rmi %ymm16, _, _, _, _, _, _
- %ymm16 = VSHUFPSZ256rmi %ymm16, _, _, _, _, _, _
- ; CHECK: %ymm16 = VSHUFPSZ256rri %ymm16, _, _
- %ymm16 = VSHUFPSZ256rri %ymm16, _, _
+ ; CHECK: %ymm16 = VSHUFPDZ256rmi %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ %ymm16 = VSHUFPDZ256rmi %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %ymm16 = VSHUFPDZ256rri %ymm16, %noreg, %noreg
+ %ymm16 = VSHUFPDZ256rri %ymm16, %noreg, %noreg
+ ; CHECK: %ymm16 = VSHUFPSZ256rmi %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ %ymm16 = VSHUFPSZ256rmi %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %ymm16 = VSHUFPSZ256rri %ymm16, %noreg, %noreg
+ %ymm16 = VSHUFPSZ256rri %ymm16, %noreg, %noreg
RET 0, %zmm0, %zmm1
...
@@ -3208,80 +3208,80 @@ body: |
name: evex_z128_to_evex_test
body: |
bb.0:
- ; CHECK: VMOVAPDZ128mr %rdi, 1, _, 0, _, %xmm16
- VMOVAPDZ128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVAPDZ128rm %rip, 1, _, %rax, _
- %xmm16 = VMOVAPDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVAPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVAPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVAPDZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMOVAPDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMOVAPDZ128rr %xmm16
%xmm16 = VMOVAPDZ128rr %xmm16
- ; CHECK: VMOVAPSZ128mr %rdi, 1, _, 0, _, %xmm16
- VMOVAPSZ128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVAPSZ128rm %rip, 1, _, %rax, _
- %xmm16 = VMOVAPSZ128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVAPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVAPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVAPSZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMOVAPSZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMOVAPSZ128rr %xmm16
%xmm16 = VMOVAPSZ128rr %xmm16
- ; CHECK: VMOVDQA32Z128mr %rdi, 1, _, 0, _, %xmm16
- VMOVDQA32Z128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVDQA32Z128rm %rip, 1, _, %rax, _
- %xmm16 = VMOVDQA32Z128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQA32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVDQA32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVDQA32Z128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMOVDQA32Z128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMOVDQA32Z128rr %xmm16
%xmm16 = VMOVDQA32Z128rr %xmm16
- ; CHECK: VMOVDQA64Z128mr %rdi, 1, _, 0, _, %xmm16
- VMOVDQA64Z128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVDQA64Z128rm %rip, 1, _, %rax, _
- %xmm16 = VMOVDQA64Z128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQA64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVDQA64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVDQA64Z128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMOVDQA64Z128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMOVDQA64Z128rr %xmm16
%xmm16 = VMOVDQA64Z128rr %xmm16
- ; CHECK: VMOVDQU16Z128mr %rdi, 1, _, 0, _, %xmm16
- VMOVDQU16Z128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVDQU16Z128rm %rip, 1, _, %rax, _
- %xmm16 = VMOVDQU16Z128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQU16Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVDQU16Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVDQU16Z128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMOVDQU16Z128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMOVDQU16Z128rr %xmm16
%xmm16 = VMOVDQU16Z128rr %xmm16
- ; CHECK: VMOVDQU32Z128mr %rdi, 1, _, 0, _, %xmm16
- VMOVDQU32Z128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVDQU32Z128rm %rip, 1, _, %rax, _
- %xmm16 = VMOVDQU32Z128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQU32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVDQU32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVDQU32Z128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMOVDQU32Z128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMOVDQU32Z128rr %xmm16
%xmm16 = VMOVDQU32Z128rr %xmm16
- ; CHECK: VMOVDQU64Z128mr %rdi, 1, _, 0, _, %xmm16
- VMOVDQU64Z128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVDQU64Z128rm %rip, 1, _, %rax, _
- %xmm16 = VMOVDQU64Z128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQU64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVDQU64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVDQU64Z128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMOVDQU64Z128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMOVDQU64Z128rr %xmm16
%xmm16 = VMOVDQU64Z128rr %xmm16
- ; CHECK: VMOVDQU8Z128mr %rdi, 1, _, 0, _, %xmm16
- VMOVDQU8Z128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVDQU8Z128rm %rip, 1, _, %rax, _
- %xmm16 = VMOVDQU8Z128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVDQU8Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVDQU8Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVDQU8Z128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMOVDQU8Z128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMOVDQU8Z128rr %xmm16
%xmm16 = VMOVDQU8Z128rr %xmm16
; CHECK: %xmm16 = VMOVDQU8Z128rr_REV %xmm16
%xmm16 = VMOVDQU8Z128rr_REV %xmm16
- ; CHECK: %xmm16 = VMOVNTDQAZ128rm %rip, 1, _, %rax, _
- %xmm16 = VMOVNTDQAZ128rm %rip, 1, _, %rax, _
- ; CHECK: VMOVUPDZ128mr %rdi, 1, _, 0, _, %xmm16
- VMOVUPDZ128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVUPDZ128rm %rip, 1, _, %rax, _
- %xmm16 = VMOVUPDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMOVNTDQAZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMOVNTDQAZ128rm %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: VMOVUPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVUPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVUPDZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMOVUPDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMOVUPDZ128rr %xmm16
%xmm16 = VMOVUPDZ128rr %xmm16
; CHECK: %xmm16 = VMOVUPDZ128rr_REV %xmm16
%xmm16 = VMOVUPDZ128rr_REV %xmm16
- ; CHECK: VMOVUPSZ128mr %rdi, 1, _, 0, _, %xmm16
- VMOVUPSZ128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVUPSZ128rm %rip, 1, _, %rax, _
- %xmm16 = VMOVUPSZ128rm %rip, 1, _, %rax, _
+ ; CHECK: VMOVUPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVUPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVUPSZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMOVUPSZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMOVUPSZ128rr %xmm16
%xmm16 = VMOVUPSZ128rr %xmm16
; CHECK: %xmm16 = VMOVUPSZ128rr_REV %xmm16
%xmm16 = VMOVUPSZ128rr_REV %xmm16
- ; CHECK: VMOVNTDQZ128mr %rdi, 1, _, 0, _, %xmm16
- VMOVNTDQZ128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: VMOVNTPDZ128mr %rdi, 1, _, 0, _, %xmm16
- VMOVNTPDZ128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: VMOVNTPSZ128mr %rdi, 1, _, 0, _, %xmm16
- VMOVNTPSZ128mr %rdi, 1, _, 0, _, %xmm16
+ ; CHECK: VMOVNTDQZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVNTDQZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: VMOVNTPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVNTPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: VMOVNTPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVNTPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
; CHECK: %xmm16 = VMOVAPDZ128rr_REV %xmm16
%xmm16 = VMOVAPDZ128rr_REV %xmm16
; CHECK: %xmm16 = VMOVAPSZ128rr_REV %xmm16
@@ -3296,786 +3296,786 @@ body: |
%xmm16 = VMOVDQU32Z128rr_REV %xmm16
; CHECK: %xmm16 = VMOVDQU64Z128rr_REV %xmm16
%xmm16 = VMOVDQU64Z128rr_REV %xmm16
- ; CHECK: %xmm16 = VPMOVSXBDZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPMOVSXBDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMOVSXBDZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMOVSXBDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMOVSXBDZ128rr %xmm16
%xmm16 = VPMOVSXBDZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVSXBQZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPMOVSXBQZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMOVSXBQZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMOVSXBQZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMOVSXBQZ128rr %xmm16
%xmm16 = VPMOVSXBQZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVSXBWZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPMOVSXBWZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMOVSXBWZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMOVSXBWZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMOVSXBWZ128rr %xmm16
%xmm16 = VPMOVSXBWZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVSXDQZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPMOVSXDQZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMOVSXDQZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMOVSXDQZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMOVSXDQZ128rr %xmm16
%xmm16 = VPMOVSXDQZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVSXWDZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPMOVSXWDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMOVSXWDZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMOVSXWDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMOVSXWDZ128rr %xmm16
%xmm16 = VPMOVSXWDZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVSXWQZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPMOVSXWQZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMOVSXWQZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMOVSXWQZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMOVSXWQZ128rr %xmm16
%xmm16 = VPMOVSXWQZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVZXBDZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPMOVZXBDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMOVZXBDZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMOVZXBDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMOVZXBDZ128rr %xmm16
%xmm16 = VPMOVZXBDZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVZXBQZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPMOVZXBQZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMOVZXBQZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMOVZXBQZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMOVZXBQZ128rr %xmm16
%xmm16 = VPMOVZXBQZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVZXBWZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPMOVZXBWZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMOVZXBWZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMOVZXBWZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMOVZXBWZ128rr %xmm16
%xmm16 = VPMOVZXBWZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVZXDQZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPMOVZXDQZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMOVZXDQZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMOVZXDQZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMOVZXDQZ128rr %xmm16
%xmm16 = VPMOVZXDQZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVZXWDZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPMOVZXWDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMOVZXWDZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMOVZXWDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMOVZXWDZ128rr %xmm16
%xmm16 = VPMOVZXWDZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVZXWQZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPMOVZXWQZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMOVZXWQZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMOVZXWQZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMOVZXWQZ128rr %xmm16
%xmm16 = VPMOVZXWQZ128rr %xmm16
- ; CHECK: VMOVHPDZ128mr %rdi, 1, _, 0, _, %xmm16
- VMOVHPDZ128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVHPDZ128rm %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VMOVHPDZ128rm %xmm16, %rdi, 1, _, 0, _
- ; CHECK: VMOVHPSZ128mr %rdi, 1, _, 0, _, %xmm16
- VMOVHPSZ128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVHPSZ128rm %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VMOVHPSZ128rm %xmm16, %rdi, 1, _, 0, _
- ; CHECK: VMOVLPDZ128mr %rdi, 1, _, 0, _, %xmm16
- VMOVLPDZ128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVLPDZ128rm %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VMOVLPDZ128rm %xmm16, %rdi, 1, _, 0, _
- ; CHECK: VMOVLPSZ128mr %rdi, 1, _, 0, _, %xmm16
- VMOVLPSZ128mr %rdi, 1, _, 0, _, %xmm16
- ; CHECK: %xmm16 = VMOVLPSZ128rm %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VMOVLPSZ128rm %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VMAXCPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMAXCPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: VMOVHPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVHPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVHPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VMOVHPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: VMOVHPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVHPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVHPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VMOVHPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: VMOVLPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVLPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVLPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VMOVLPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: VMOVLPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ VMOVLPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
+ ; CHECK: %xmm16 = VMOVLPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VMOVLPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VMAXCPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMAXCPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMAXCPDZ128rr %xmm16, %xmm1
%xmm16 = VMAXCPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXCPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMAXCPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMAXCPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMAXCPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMAXCPSZ128rr %xmm16, %xmm1
%xmm16 = VMAXCPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMAXPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMAXPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMAXPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMAXPDZ128rr %xmm16, %xmm1
%xmm16 = VMAXPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMAXPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMAXPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMAXPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMAXPSZ128rr %xmm16, %xmm1
%xmm16 = VMAXPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINCPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMINCPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMINCPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMINCPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMINCPDZ128rr %xmm16, %xmm1
%xmm16 = VMINCPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINCPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMINCPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMINCPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMINCPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMINCPSZ128rr %xmm16, %xmm1
%xmm16 = VMINCPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMINPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMINPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMINPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMINPDZ128rr %xmm16, %xmm1
%xmm16 = VMINPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMINPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMINPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMINPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMINPSZ128rr %xmm16, %xmm1
%xmm16 = VMINPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMULPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMULPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMULPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMULPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMULPDZ128rr %xmm16, %xmm1
%xmm16 = VMULPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMULPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMULPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMULPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMULPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMULPSZ128rr %xmm16, %xmm1
%xmm16 = VMULPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VORPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VORPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VORPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VORPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VORPDZ128rr %xmm16, %xmm1
%xmm16 = VORPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VORPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VORPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VORPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VORPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VORPSZ128rr %xmm16, %xmm1
%xmm16 = VORPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPADDBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPADDBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPADDBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPADDBZ128rr %xmm16, %xmm1
%xmm16 = VPADDBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPADDDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPADDDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPADDDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPADDDZ128rr %xmm16, %xmm1
%xmm16 = VPADDDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPADDQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPADDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPADDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPADDQZ128rr %xmm16, %xmm1
%xmm16 = VPADDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDSBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPADDSBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPADDSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPADDSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPADDSBZ128rr %xmm16, %xmm1
%xmm16 = VPADDSBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDSWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPADDSWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPADDSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPADDSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPADDSWZ128rr %xmm16, %xmm1
%xmm16 = VPADDSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDUSBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPADDUSBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPADDUSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPADDUSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPADDUSBZ128rr %xmm16, %xmm1
%xmm16 = VPADDUSBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDUSWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPADDUSWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPADDUSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPADDUSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPADDUSWZ128rr %xmm16, %xmm1
%xmm16 = VPADDUSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPADDWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPADDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPADDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPADDWZ128rr %xmm16, %xmm1
%xmm16 = VPADDWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPANDDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPANDDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPANDDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPANDDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPANDDZ128rr %xmm16, %xmm1
%xmm16 = VPANDDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPANDQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPANDQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPANDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPANDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPANDQZ128rr %xmm16, %xmm1
%xmm16 = VPANDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPANDNDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPANDNDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPANDNDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPANDNDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPANDNDZ128rr %xmm16, %xmm1
%xmm16 = VPANDNDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPANDNQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPANDNQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPANDNQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPANDNQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPANDNQZ128rr %xmm16, %xmm1
%xmm16 = VPANDNQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPAVGBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPAVGBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPAVGBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPAVGBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPAVGBZ128rr %xmm16, %xmm1
%xmm16 = VPAVGBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPAVGWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPAVGWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPAVGWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPAVGWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPAVGWZ128rr %xmm16, %xmm1
%xmm16 = VPAVGWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMAXSBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMAXSBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMAXSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMAXSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMAXSBZ128rr %xmm16, %xmm1
%xmm16 = VPMAXSBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMAXSDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMAXSDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMAXSDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMAXSDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMAXSDZ128rr %xmm16, %xmm1
%xmm16 = VPMAXSDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMAXSWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMAXSWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMAXSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMAXSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMAXSWZ128rr %xmm16, %xmm1
%xmm16 = VPMAXSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMAXUBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMAXUBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMAXUBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMAXUBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMAXUBZ128rr %xmm16, %xmm1
%xmm16 = VPMAXUBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMAXUDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMAXUDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMAXUDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMAXUDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMAXUDZ128rr %xmm16, %xmm1
%xmm16 = VPMAXUDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMAXUWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMAXUWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMAXUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMAXUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMAXUWZ128rr %xmm16, %xmm1
%xmm16 = VPMAXUWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMINSBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMINSBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMINSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMINSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMINSBZ128rr %xmm16, %xmm1
%xmm16 = VPMINSBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMINSDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMINSDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMINSDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMINSDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMINSDZ128rr %xmm16, %xmm1
%xmm16 = VPMINSDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMINSWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMINSWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMINSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMINSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMINSWZ128rr %xmm16, %xmm1
%xmm16 = VPMINSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMINUBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMINUBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMINUBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMINUBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMINUBZ128rr %xmm16, %xmm1
%xmm16 = VPMINUBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMINUDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMINUDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMINUDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMINUDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMINUDZ128rr %xmm16, %xmm1
%xmm16 = VPMINUDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMINUWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMINUWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMINUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMINUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMINUWZ128rr %xmm16, %xmm1
%xmm16 = VPMINUWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULDQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMULDQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMULDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMULDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMULDQZ128rr %xmm16, %xmm1
%xmm16 = VPMULDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULHRSWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMULHRSWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMULHRSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMULHRSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMULHRSWZ128rr %xmm16, %xmm1
%xmm16 = VPMULHRSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULHUWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMULHUWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMULHUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMULHUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMULHUWZ128rr %xmm16, %xmm1
%xmm16 = VPMULHUWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULHWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMULHWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMULHWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMULHWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMULHWZ128rr %xmm16, %xmm1
%xmm16 = VPMULHWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULLDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMULLDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMULLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMULLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMULLDZ128rr %xmm16, %xmm1
%xmm16 = VPMULLDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULLWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMULLWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMULLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMULLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMULLWZ128rr %xmm16, %xmm1
%xmm16 = VPMULLWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULUDQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMULUDQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMULUDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMULUDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMULUDQZ128rr %xmm16, %xmm1
%xmm16 = VPMULUDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPORDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPORDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPORDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPORDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPORDZ128rr %xmm16, %xmm1
%xmm16 = VPORDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPORQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPORQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPORQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPORQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPORQZ128rr %xmm16, %xmm1
%xmm16 = VPORQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSUBBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSUBBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSUBBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSUBBZ128rr %xmm16, %xmm1
%xmm16 = VPSUBBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSUBDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSUBDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSUBDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSUBDZ128rr %xmm16, %xmm1
%xmm16 = VPSUBDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSUBQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSUBQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSUBQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSUBQZ128rr %xmm16, %xmm1
%xmm16 = VPSUBQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBSBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSUBSBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSUBSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSUBSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSUBSBZ128rr %xmm16, %xmm1
%xmm16 = VPSUBSBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBSWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSUBSWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSUBSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSUBSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSUBSWZ128rr %xmm16, %xmm1
%xmm16 = VPSUBSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBUSBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSUBUSBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSUBUSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSUBUSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSUBUSBZ128rr %xmm16, %xmm1
%xmm16 = VPSUBUSBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBUSWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSUBUSWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSUBUSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSUBUSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSUBUSWZ128rr %xmm16, %xmm1
%xmm16 = VPSUBUSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSUBWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSUBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSUBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSUBWZ128rr %xmm16, %xmm1
%xmm16 = VPSUBWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VADDPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VADDPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VADDPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VADDPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VADDPDZ128rr %xmm16, %xmm1
%xmm16 = VADDPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VADDPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VADDPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VADDPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VADDPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VADDPSZ128rr %xmm16, %xmm1
%xmm16 = VADDPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VANDNPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VANDNPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VANDNPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VANDNPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VANDNPDZ128rr %xmm16, %xmm1
%xmm16 = VANDNPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VANDNPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VANDNPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VANDNPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VANDNPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VANDNPSZ128rr %xmm16, %xmm1
%xmm16 = VANDNPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VANDPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VANDPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VANDPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VANDPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VANDPDZ128rr %xmm16, %xmm1
%xmm16 = VANDPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VANDPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VANDPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VANDPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VANDPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VANDPSZ128rr %xmm16, %xmm1
%xmm16 = VANDPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VDIVPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VDIVPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VDIVPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VDIVPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VDIVPDZ128rr %xmm16, %xmm1
%xmm16 = VDIVPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VDIVPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VDIVPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VDIVPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VDIVPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VDIVPSZ128rr %xmm16, %xmm1
%xmm16 = VDIVPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPXORDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPXORDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPXORDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPXORDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPXORDZ128rr %xmm16, %xmm1
%xmm16 = VPXORDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPXORQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPXORQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPXORQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPXORQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPXORQZ128rr %xmm16, %xmm1
%xmm16 = VPXORQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VSUBPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VSUBPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VSUBPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VSUBPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VSUBPDZ128rr %xmm16, %xmm1
%xmm16 = VSUBPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VSUBPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VSUBPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VSUBPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VSUBPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VSUBPSZ128rr %xmm16, %xmm1
%xmm16 = VSUBPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VXORPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VXORPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VXORPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VXORPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VXORPDZ128rr %xmm16, %xmm1
%xmm16 = VXORPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VXORPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VXORPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VXORPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VXORPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VXORPSZ128rr %xmm16, %xmm1
%xmm16 = VXORPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMADDUBSWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMADDUBSWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMADDUBSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMADDUBSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMADDUBSWZ128rr %xmm16, %xmm1
%xmm16 = VPMADDUBSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMADDWDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPMADDWDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPMADDWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPMADDWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPMADDWDZ128rr %xmm16, %xmm1
%xmm16 = VPMADDWDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPACKSSDWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPACKSSDWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPACKSSDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPACKSSDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPACKSSDWZ128rr %xmm16, %xmm1
%xmm16 = VPACKSSDWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPACKSSWBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPACKSSWBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPACKSSWBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPACKSSWBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPACKSSWBZ128rr %xmm16, %xmm1
%xmm16 = VPACKSSWBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPACKUSDWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPACKUSDWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPACKUSDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPACKUSDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPACKUSDWZ128rr %xmm16, %xmm1
%xmm16 = VPACKUSDWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPACKUSWBZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPACKUSWBZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPACKUSWBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPACKUSWBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPACKUSWBZ128rr %xmm16, %xmm1
%xmm16 = VPACKUSWBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKHBWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPUNPCKHBWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPUNPCKHBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPUNPCKHBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPUNPCKHBWZ128rr %xmm16, %xmm1
%xmm16 = VPUNPCKHBWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKHDQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPUNPCKHDQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPUNPCKHDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPUNPCKHDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPUNPCKHDQZ128rr %xmm16, %xmm1
%xmm16 = VPUNPCKHDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKHQDQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPUNPCKHQDQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPUNPCKHQDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPUNPCKHQDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPUNPCKHQDQZ128rr %xmm16, %xmm1
%xmm16 = VPUNPCKHQDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKHWDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPUNPCKHWDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPUNPCKHWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPUNPCKHWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPUNPCKHWDZ128rr %xmm16, %xmm1
%xmm16 = VPUNPCKHWDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKLBWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPUNPCKLBWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPUNPCKLBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPUNPCKLBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPUNPCKLBWZ128rr %xmm16, %xmm1
%xmm16 = VPUNPCKLBWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKLDQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPUNPCKLDQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPUNPCKLDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPUNPCKLDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPUNPCKLDQZ128rr %xmm16, %xmm1
%xmm16 = VPUNPCKLDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKLQDQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPUNPCKLQDQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPUNPCKLQDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPUNPCKLQDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPUNPCKLQDQZ128rr %xmm16, %xmm1
%xmm16 = VPUNPCKLQDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKLWDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPUNPCKLWDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPUNPCKLWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPUNPCKLWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPUNPCKLWDZ128rr %xmm16, %xmm1
%xmm16 = VPUNPCKLWDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VUNPCKHPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VUNPCKHPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VUNPCKHPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VUNPCKHPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VUNPCKHPDZ128rr %xmm16, %xmm1
%xmm16 = VUNPCKHPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VUNPCKHPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VUNPCKHPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VUNPCKHPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VUNPCKHPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VUNPCKHPSZ128rr %xmm16, %xmm1
%xmm16 = VUNPCKHPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VUNPCKLPDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VUNPCKLPDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VUNPCKLPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VUNPCKLPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VUNPCKLPDZ128rr %xmm16, %xmm1
%xmm16 = VUNPCKLPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VUNPCKLPSZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VUNPCKLPSZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VUNPCKLPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VUNPCKLPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VUNPCKLPSZ128rr %xmm16, %xmm1
%xmm16 = VUNPCKLPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VFMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADD132PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD132PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADD132PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD132PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADD213PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD213PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADD213PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD213PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADD231PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD231PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADD231PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD231PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADDSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADDSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADDSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADDSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADDSUB132PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMADDSUB132PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADDSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADDSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADDSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADDSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADDSUB132PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMADDSUB132PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADDSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADDSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADDSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADDSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADDSUB213PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMADDSUB213PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADDSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADDSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADDSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADDSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADDSUB213PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMADDSUB213PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADDSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADDSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADDSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADDSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADDSUB231PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMADDSUB231PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADDSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADDSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADDSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADDSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADDSUB231PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMADDSUB231PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUB132PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB132PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUB132PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB132PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUB213PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB213PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUB213PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB213PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUB231PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB231PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUB231PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB231PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUBADD132PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUBADD132PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUBADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUBADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUBADD132PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUBADD132PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUBADD132PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUBADD132PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUBADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUBADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUBADD132PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUBADD132PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUBADD213PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUBADD213PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUBADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUBADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUBADD213PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUBADD213PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUBADD213PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUBADD213PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUBADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUBADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUBADD213PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUBADD213PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUBADD231PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUBADD231PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUBADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUBADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUBADD231PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUBADD231PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUBADD231PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUBADD231PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUBADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUBADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUBADD231PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUBADD231PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMADD132PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD132PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMADD132PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD132PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMADD213PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD213PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMADD213PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD213PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMADD231PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD231PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMADD231PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD231PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMSUB132PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB132PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMSUB132PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB132PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMSUB213PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB213PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMSUB213PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB213PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMSUB231PDZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB231PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMSUB231PSZ128r %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB231PSZ128r %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VPSLLDZ128ri %xmm16, 7
%xmm16 = VPSLLDZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSLLDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSLLDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSLLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSLLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSLLDZ128rr %xmm16, 14
%xmm16 = VPSLLDZ128rr %xmm16, 14
; CHECK: %xmm16 = VPSLLQZ128ri %xmm16, 7
%xmm16 = VPSLLQZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSLLQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSLLQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSLLQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSLLQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSLLQZ128rr %xmm16, 14
%xmm16 = VPSLLQZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSLLVDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSLLVDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSLLVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSLLVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSLLVDZ128rr %xmm16, 14
%xmm16 = VPSLLVDZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSLLVQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSLLVQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSLLVQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSLLVQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSLLVQZ128rr %xmm16, 14
%xmm16 = VPSLLVQZ128rr %xmm16, 14
; CHECK: %xmm16 = VPSLLWZ128ri %xmm16, 7
%xmm16 = VPSLLWZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSLLWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSLLWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSLLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSLLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSLLWZ128rr %xmm16, 14
%xmm16 = VPSLLWZ128rr %xmm16, 14
; CHECK: %xmm16 = VPSRADZ128ri %xmm16, 7
%xmm16 = VPSRADZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSRADZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSRADZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSRADZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSRADZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSRADZ128rr %xmm16, 14
%xmm16 = VPSRADZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSRAVDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSRAVDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSRAVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSRAVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSRAVDZ128rr %xmm16, 14
%xmm16 = VPSRAVDZ128rr %xmm16, 14
; CHECK: %xmm16 = VPSRAWZ128ri %xmm16, 7
%xmm16 = VPSRAWZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSRAWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSRAWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSRAWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSRAWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSRAWZ128rr %xmm16, 14
%xmm16 = VPSRAWZ128rr %xmm16, 14
; CHECK: %xmm16 = VPSRLDQZ128rr %xmm16, 14
%xmm16 = VPSRLDQZ128rr %xmm16, 14
; CHECK: %xmm16 = VPSRLDZ128ri %xmm16, 7
%xmm16 = VPSRLDZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSRLDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSRLDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSRLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSRLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSRLDZ128rr %xmm16, 14
%xmm16 = VPSRLDZ128rr %xmm16, 14
; CHECK: %xmm16 = VPSRLQZ128ri %xmm16, 7
%xmm16 = VPSRLQZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSRLQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSRLQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSRLQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSRLQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSRLQZ128rr %xmm16, 14
%xmm16 = VPSRLQZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSRLVDZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSRLVDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSRLVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSRLVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSRLVDZ128rr %xmm16, 14
%xmm16 = VPSRLVDZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSRLVQZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSRLVQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSRLVQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSRLVQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSRLVQZ128rr %xmm16, 14
%xmm16 = VPSRLVQZ128rr %xmm16, 14
; CHECK: %xmm16 = VPSRLWZ128ri %xmm16, 7
%xmm16 = VPSRLWZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSRLWZ128rm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VPSRLWZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPSRLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPSRLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPSRLWZ128rr %xmm16, 14
%xmm16 = VPSRLWZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPERMILPDZ128mi %rdi, 1, _, 0, _, _
- %xmm16 = VPERMILPDZ128mi %rdi, 1, _, 0, _, _
+ ; CHECK: %xmm16 = VPERMILPDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %xmm16 = VPERMILPDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %xmm16 = VPERMILPDZ128ri %xmm16, 9
%xmm16 = VPERMILPDZ128ri %xmm16, 9
- ; CHECK: %xmm16 = VPERMILPDZ128rm %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VPERMILPDZ128rm %xmm16, %rdi, 1, _, 0, _
+ ; CHECK: %xmm16 = VPERMILPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VPERMILPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VPERMILPDZ128rr %xmm16, %xmm1
%xmm16 = VPERMILPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPERMILPSZ128mi %rdi, 1, _, 0, _, _
- %xmm16 = VPERMILPSZ128mi %rdi, 1, _, 0, _, _
+ ; CHECK: %xmm16 = VPERMILPSZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %xmm16 = VPERMILPSZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %xmm16 = VPERMILPSZ128ri %xmm16, 9
%xmm16 = VPERMILPSZ128ri %xmm16, 9
- ; CHECK: %xmm16 = VPERMILPSZ128rm %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VPERMILPSZ128rm %xmm16, %rdi, 1, _, 0, _
+ ; CHECK: %xmm16 = VPERMILPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VPERMILPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VPERMILPSZ128rr %xmm16, %xmm1
%xmm16 = VPERMILPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VCVTPH2PSZ128rm %rdi, %xmm16, 1, _, 0
- %xmm16 = VCVTPH2PSZ128rm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %xmm16 = VCVTPH2PSZ128rm %rdi, %xmm16, 1, %noreg, 0
+ %xmm16 = VCVTPH2PSZ128rm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %xmm16 = VCVTPH2PSZ128rr %xmm16
%xmm16 = VCVTPH2PSZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTDQ2PDZ128rm %rdi, %xmm16, 1, _, 0
- %xmm16 = VCVTDQ2PDZ128rm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %xmm16 = VCVTDQ2PDZ128rm %rdi, %xmm16, 1, %noreg, 0
+ %xmm16 = VCVTDQ2PDZ128rm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %xmm16 = VCVTDQ2PDZ128rr %xmm16
%xmm16 = VCVTDQ2PDZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTDQ2PSZ128rm %rdi, %xmm16, 1, _, 0
- %xmm16 = VCVTDQ2PSZ128rm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %xmm16 = VCVTDQ2PSZ128rm %rdi, %xmm16, 1, %noreg, 0
+ %xmm16 = VCVTDQ2PSZ128rm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %xmm16 = VCVTDQ2PSZ128rr %xmm16
%xmm16 = VCVTDQ2PSZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTPD2DQZ128rm %rdi, %xmm16, 1, _, 0
- %xmm16 = VCVTPD2DQZ128rm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %xmm16 = VCVTPD2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
+ %xmm16 = VCVTPD2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %xmm16 = VCVTPD2DQZ128rr %xmm16
%xmm16 = VCVTPD2DQZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTPD2PSZ128rm %rdi, %xmm16, 1, _, 0
- %xmm16 = VCVTPD2PSZ128rm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %xmm16 = VCVTPD2PSZ128rm %rdi, %xmm16, 1, %noreg, 0
+ %xmm16 = VCVTPD2PSZ128rm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %xmm16 = VCVTPD2PSZ128rr %xmm16
%xmm16 = VCVTPD2PSZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTPS2DQZ128rm %rdi, %xmm16, 1, _, 0
- %xmm16 = VCVTPS2DQZ128rm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %xmm16 = VCVTPS2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
+ %xmm16 = VCVTPS2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %xmm16 = VCVTPS2DQZ128rr %xmm16
%xmm16 = VCVTPS2DQZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTPS2PDZ128rm %rdi, %xmm16, 1, _, 0
- %xmm16 = VCVTPS2PDZ128rm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %xmm16 = VCVTPS2PDZ128rm %rdi, %xmm16, 1, %noreg, 0
+ %xmm16 = VCVTPS2PDZ128rm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %xmm16 = VCVTPS2PDZ128rr %xmm16
%xmm16 = VCVTPS2PDZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTTPD2DQZ128rm %rdi, %xmm16, 1, _, 0
- %xmm16 = VCVTTPD2DQZ128rm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %xmm16 = VCVTTPD2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
+ %xmm16 = VCVTTPD2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %xmm16 = VCVTTPD2DQZ128rr %xmm16
%xmm16 = VCVTTPD2DQZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTTPS2DQZ128rm %rdi, %xmm16, 1, _, 0
- %xmm16 = VCVTTPS2DQZ128rm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %xmm16 = VCVTTPS2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
+ %xmm16 = VCVTTPS2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %xmm16 = VCVTTPS2DQZ128rr %xmm16
%xmm16 = VCVTTPS2DQZ128rr %xmm16
- ; CHECK: %xmm16 = VSQRTPDZ128m %rdi, _, _, _, _
- %xmm16 = VSQRTPDZ128m %rdi, _, _, _, _
+ ; CHECK: %xmm16 = VSQRTPDZ128m %rdi, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VSQRTPDZ128m %rdi, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VSQRTPDZ128r %xmm16
%xmm16 = VSQRTPDZ128r %xmm16
- ; CHECK: %xmm16 = VSQRTPSZ128m %rdi, _, _, _, _
- %xmm16 = VSQRTPSZ128m %rdi, _, _, _, _
+ ; CHECK: %xmm16 = VSQRTPSZ128m %rdi, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VSQRTPSZ128m %rdi, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VSQRTPSZ128r %xmm16
%xmm16 = VSQRTPSZ128r %xmm16
- ; CHECK: %xmm16 = VMOVDDUPZ128rm %rdi, 1, _, 0, _
- %xmm16 = VMOVDDUPZ128rm %rdi, 1, _, 0, _
+ ; CHECK: %xmm16 = VMOVDDUPZ128rm %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VMOVDDUPZ128rm %rdi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VMOVDDUPZ128rr %xmm16
%xmm16 = VMOVDDUPZ128rr %xmm16
- ; CHECK: %xmm16 = VMOVSHDUPZ128rm %rdi, 1, _, 0, _
- %xmm16 = VMOVSHDUPZ128rm %rdi, 1, _, 0, _
+ ; CHECK: %xmm16 = VMOVSHDUPZ128rm %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VMOVSHDUPZ128rm %rdi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VMOVSHDUPZ128rr %xmm16
%xmm16 = VMOVSHDUPZ128rr %xmm16
- ; CHECK: %xmm16 = VMOVSLDUPZ128rm %rdi, 1, _, 0, _
- %xmm16 = VMOVSLDUPZ128rm %rdi, 1, _, 0, _
+ ; CHECK: %xmm16 = VMOVSLDUPZ128rm %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VMOVSLDUPZ128rm %rdi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VMOVSLDUPZ128rr %xmm16
%xmm16 = VMOVSLDUPZ128rr %xmm16
- ; CHECK: %xmm16 = VPSHUFBZ128rm %xmm16, _, _, _, _, _
- %xmm16 = VPSHUFBZ128rm %xmm16, _, _, _, _, _
+ ; CHECK: %xmm16 = VPSHUFBZ128rm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VPSHUFBZ128rm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VPSHUFBZ128rr %xmm16, %xmm1
%xmm16 = VPSHUFBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSHUFDZ128mi %rdi, 1, _, 0, _, _
- %xmm16 = VPSHUFDZ128mi %rdi, 1, _, 0, _, _
+ ; CHECK: %xmm16 = VPSHUFDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %xmm16 = VPSHUFDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %xmm16 = VPSHUFDZ128ri %xmm16, -24
%xmm16 = VPSHUFDZ128ri %xmm16, -24
- ; CHECK: %xmm16 = VPSHUFHWZ128mi %rdi, 1, _, 0, _, _
- %xmm16 = VPSHUFHWZ128mi %rdi, 1, _, 0, _, _
+ ; CHECK: %xmm16 = VPSHUFHWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %xmm16 = VPSHUFHWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %xmm16 = VPSHUFHWZ128ri %xmm16, -24
%xmm16 = VPSHUFHWZ128ri %xmm16, -24
- ; CHECK: %xmm16 = VPSHUFLWZ128mi %rdi, 1, _, 0, _, _
- %xmm16 = VPSHUFLWZ128mi %rdi, 1, _, 0, _, _
+ ; CHECK: %xmm16 = VPSHUFLWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
+ %xmm16 = VPSHUFLWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
; CHECK: %xmm16 = VPSHUFLWZ128ri %xmm16, -24
%xmm16 = VPSHUFLWZ128ri %xmm16, -24
; CHECK: %xmm16 = VPSLLDQZ128rr %xmm16, %xmm1
%xmm16 = VPSLLDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VSHUFPDZ128rmi %xmm16, _, _, _, _, _, _
- %xmm16 = VSHUFPDZ128rmi %xmm16, _, _, _, _, _, _
- ; CHECK: %xmm16 = VSHUFPDZ128rri %xmm16, _, _
- %xmm16 = VSHUFPDZ128rri %xmm16, _, _
- ; CHECK: %xmm16 = VSHUFPSZ128rmi %xmm16, _, _, _, _, _, _
- %xmm16 = VSHUFPSZ128rmi %xmm16, _, _, _, _, _, _
- ; CHECK: %xmm16 = VSHUFPSZ128rri %xmm16, _, _
- %xmm16 = VSHUFPSZ128rri %xmm16, _, _
- ; CHECK: %xmm16 = VPSADBWZ128rm %xmm16, 1, _, %rax, _, _
- %xmm16 = VPSADBWZ128rm %xmm16, 1, _, %rax, _, _
+ ; CHECK: %xmm16 = VSHUFPDZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VSHUFPDZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm16 = VSHUFPDZ128rri %xmm16, %noreg, %noreg
+ %xmm16 = VSHUFPDZ128rri %xmm16, %noreg, %noreg
+ ; CHECK: %xmm16 = VSHUFPSZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VSHUFPSZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm16 = VSHUFPSZ128rri %xmm16, %noreg, %noreg
+ %xmm16 = VSHUFPSZ128rri %xmm16, %noreg, %noreg
+ ; CHECK: %xmm16 = VPSADBWZ128rm %xmm16, 1, %noreg, %rax, %noreg, %noreg
+ %xmm16 = VPSADBWZ128rm %xmm16, 1, %noreg, %rax, %noreg, %noreg
; CHECK: %xmm16 = VPSADBWZ128rr %xmm16, %xmm1
%xmm16 = VPSADBWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VBROADCASTSSZ128m %rip, _, _, _, _
- %xmm16 = VBROADCASTSSZ128m %rip, _, _, _, _
+ ; CHECK: %xmm16 = VBROADCASTSSZ128m %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VBROADCASTSSZ128m %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VBROADCASTSSZ128r %xmm16
%xmm16 = VBROADCASTSSZ128r %xmm16
- ; CHECK: %xmm16 = VPBROADCASTBZ128m %rip, _, _, _, _
- %xmm16 = VPBROADCASTBZ128m %rip, _, _, _, _
+ ; CHECK: %xmm16 = VPBROADCASTBZ128m %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VPBROADCASTBZ128m %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VPBROADCASTBZ128r %xmm16
%xmm16 = VPBROADCASTBZ128r %xmm16
- ; CHECK: %xmm16 = VPBROADCASTDZ128m %rip, _, _, _, _
- %xmm16 = VPBROADCASTDZ128m %rip, _, _, _, _
+ ; CHECK: %xmm16 = VPBROADCASTDZ128m %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VPBROADCASTDZ128m %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VPBROADCASTDZ128r %xmm16
%xmm16 = VPBROADCASTDZ128r %xmm16
- ; CHECK: %xmm16 = VPBROADCASTQZ128m %rip, _, _, _, _
- %xmm16 = VPBROADCASTQZ128m %rip, _, _, _, _
+ ; CHECK: %xmm16 = VPBROADCASTQZ128m %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VPBROADCASTQZ128m %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VPBROADCASTQZ128r %xmm16
%xmm16 = VPBROADCASTQZ128r %xmm16
- ; CHECK: %xmm16 = VPBROADCASTWZ128m %rip, _, _, _, _
- %xmm16 = VPBROADCASTWZ128m %rip, _, _, _, _
+ ; CHECK: %xmm16 = VPBROADCASTWZ128m %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VPBROADCASTWZ128m %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VPBROADCASTWZ128r %xmm16
%xmm16 = VPBROADCASTWZ128r %xmm16
- ; CHECK: %xmm16 = VBROADCASTI32X2Z128m %rip, _, _, _, _
- %xmm16 = VBROADCASTI32X2Z128m %rip, _, _, _, _
+ ; CHECK: %xmm16 = VBROADCASTI32X2Z128m %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VBROADCASTI32X2Z128m %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VBROADCASTI32X2Z128r %xmm0
%xmm16 = VBROADCASTI32X2Z128r %xmm0
; CHECK: %xmm16 = VCVTPS2PHZ128rr %xmm16, 2
%xmm16 = VCVTPS2PHZ128rr %xmm16, 2
- ; CHECK: VCVTPS2PHZ128mr %rdi, %xmm16, 1, _, 0, _, _
- VCVTPS2PHZ128mr %rdi, %xmm16, 1, _, 0, _, _
- ; CHECK: %xmm16 = VPABSBZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPABSBZ128rm %rip, 1, _, %rax, _
+ ; CHECK: VCVTPS2PHZ128mr %rdi, %xmm16, 1, %noreg, 0, %noreg, %noreg
+ VCVTPS2PHZ128mr %rdi, %xmm16, 1, %noreg, 0, %noreg, %noreg
+ ; CHECK: %xmm16 = VPABSBZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPABSBZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPABSBZ128rr %xmm16
%xmm16 = VPABSBZ128rr %xmm16
- ; CHECK: %xmm16 = VPABSDZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPABSDZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPABSDZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPABSDZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPABSDZ128rr %xmm16
%xmm16 = VPABSDZ128rr %xmm16
- ; CHECK: %xmm16 = VPABSWZ128rm %rip, 1, _, %rax, _
- %xmm16 = VPABSWZ128rm %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPABSWZ128rm %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VPABSWZ128rm %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VPABSWZ128rr %xmm16
%xmm16 = VPABSWZ128rr %xmm16
- ; CHECK: %xmm16 = VPALIGNRZ128rmi %xmm16, _, _, _, _, _, _
- %xmm16 = VPALIGNRZ128rmi %xmm16, _, _, _, _, _, _
+ ; CHECK: %xmm16 = VPALIGNRZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VPALIGNRZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VPALIGNRZ128rri %xmm16, %xmm1, 15
%xmm16 = VPALIGNRZ128rri %xmm16, %xmm1, 15
- ; CHECK: VEXTRACTPSZmr %rdi, 1, _, 0, _, %xmm16, _
- VEXTRACTPSZmr %rdi, 1, _, 0, _, %xmm16, _
- ; CHECK: %eax = VEXTRACTPSZrr %xmm16, _
- %eax = VEXTRACTPSZrr %xmm16, _
- ; CHECK: %xmm16 = VINSERTPSZrm %xmm16, %rdi, _, _, _, _, _
- %xmm16 = VINSERTPSZrm %xmm16, %rdi, _, _, _, _, _
- ; CHECK: %xmm16 = VINSERTPSZrr %xmm16, %xmm16, _
- %xmm16 = VINSERTPSZrr %xmm16, %xmm16, _
+ ; CHECK: VEXTRACTPSZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, %noreg
+ VEXTRACTPSZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, %noreg
+ ; CHECK: %eax = VEXTRACTPSZrr %xmm16, %noreg
+ %eax = VEXTRACTPSZrr %xmm16, %noreg
+ ; CHECK: %xmm16 = VINSERTPSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VINSERTPSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm16 = VINSERTPSZrr %xmm16, %xmm16, %noreg
+ %xmm16 = VINSERTPSZrr %xmm16, %xmm16, %noreg
RET 0, %zmm0, %zmm1
...
@@ -4086,546 +4086,546 @@ body: |
name: evex_scalar_to_evex_test
body: |
bb.0:
- ; CHECK: %xmm16 = VADDSDZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VADDSDZrm %xmm16, %rip, 1, _, %rax, _
- ; CHECK: %xmm16 = VADDSDZrm_Int %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VADDSDZrm_Int %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VADDSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VADDSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm16 = VADDSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VADDSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VADDSDZrr %xmm16, %xmm1
%xmm16 = VADDSDZrr %xmm16, %xmm1
; CHECK: %xmm16 = VADDSDZrr_Int %xmm16, %xmm1
%xmm16 = VADDSDZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VADDSSZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VADDSSZrm %xmm16, %rip, 1, _, %rax, _
- ; CHECK: %xmm16 = VADDSSZrm_Int %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VADDSSZrm_Int %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VADDSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VADDSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm16 = VADDSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VADDSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VADDSSZrr %xmm16, %xmm1
%xmm16 = VADDSSZrr %xmm16, %xmm1
; CHECK: %xmm16 = VADDSSZrr_Int %xmm16, %xmm1
%xmm16 = VADDSSZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VDIVSDZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VDIVSDZrm %xmm16, %rip, 1, _, %rax, _
- ; CHECK: %xmm16 = VDIVSDZrm_Int %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VDIVSDZrm_Int %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VDIVSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VDIVSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm16 = VDIVSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VDIVSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VDIVSDZrr %xmm16, %xmm1
%xmm16 = VDIVSDZrr %xmm16, %xmm1
; CHECK: %xmm16 = VDIVSDZrr_Int %xmm16, %xmm1
%xmm16 = VDIVSDZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VDIVSSZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VDIVSSZrm %xmm16, %rip, 1, _, %rax, _
- ; CHECK: %xmm16 = VDIVSSZrm_Int %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VDIVSSZrm_Int %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VDIVSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VDIVSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm16 = VDIVSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VDIVSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VDIVSSZrr %xmm16, %xmm1
%xmm16 = VDIVSSZrr %xmm16, %xmm1
; CHECK: %xmm16 = VDIVSSZrr_Int %xmm16, %xmm1
%xmm16 = VDIVSSZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXCSDZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMAXCSDZrm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMAXCSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMAXCSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMAXCSDZrr %xmm16, %xmm1
%xmm16 = VMAXCSDZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXCSSZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMAXCSSZrm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMAXCSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMAXCSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMAXCSSZrr %xmm16, %xmm1
%xmm16 = VMAXCSSZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXSDZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMAXSDZrm %xmm16, %rip, 1, _, %rax, _
- ; CHECK: %xmm16 = VMAXSDZrm_Int %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMAXSDZrm_Int %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMAXSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMAXSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm16 = VMAXSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMAXSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMAXSDZrr %xmm16, %xmm1
%xmm16 = VMAXSDZrr %xmm16, %xmm1
; CHECK: %xmm16 = VMAXSDZrr_Int %xmm16, %xmm1
%xmm16 = VMAXSDZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXSSZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMAXSSZrm %xmm16, %rip, 1, _, %rax, _
- ; CHECK: %xmm16 = VMAXSSZrm_Int %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMAXSSZrm_Int %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMAXSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMAXSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm16 = VMAXSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMAXSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMAXSSZrr %xmm16, %xmm1
%xmm16 = VMAXSSZrr %xmm16, %xmm1
; CHECK: %xmm16 = VMAXSSZrr_Int %xmm16, %xmm1
%xmm16 = VMAXSSZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINCSDZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMINCSDZrm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMINCSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMINCSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMINCSDZrr %xmm16, %xmm1
%xmm16 = VMINCSDZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINCSSZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMINCSSZrm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMINCSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMINCSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMINCSSZrr %xmm16, %xmm1
%xmm16 = VMINCSSZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINSDZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMINSDZrm %xmm16, %rip, 1, _, %rax, _
- ; CHECK: %xmm16 = VMINSDZrm_Int %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMINSDZrm_Int %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMINSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMINSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm16 = VMINSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMINSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMINSDZrr %xmm16, %xmm1
%xmm16 = VMINSDZrr %xmm16, %xmm1
; CHECK: %xmm16 = VMINSDZrr_Int %xmm16, %xmm1
%xmm16 = VMINSDZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINSSZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMINSSZrm %xmm16, %rip, 1, _, %rax, _
- ; CHECK: %xmm16 = VMINSSZrm_Int %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMINSSZrm_Int %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMINSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMINSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm16 = VMINSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMINSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMINSSZrr %xmm16, %xmm1
%xmm16 = VMINSSZrr %xmm16, %xmm1
; CHECK: %xmm16 = VMINSSZrr_Int %xmm16, %xmm1
%xmm16 = VMINSSZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VMULSDZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMULSDZrm %xmm16, %rip, 1, _, %rax, _
- ; CHECK: %xmm16 = VMULSDZrm_Int %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMULSDZrm_Int %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMULSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMULSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm16 = VMULSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMULSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMULSDZrr %xmm16, %xmm1
%xmm16 = VMULSDZrr %xmm16, %xmm1
; CHECK: %xmm16 = VMULSDZrr_Int %xmm16, %xmm1
%xmm16 = VMULSDZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VMULSSZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMULSSZrm %xmm16, %rip, 1, _, %rax, _
- ; CHECK: %xmm16 = VMULSSZrm_Int %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VMULSSZrm_Int %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VMULSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMULSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm16 = VMULSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VMULSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VMULSSZrr %xmm16, %xmm1
%xmm16 = VMULSSZrr %xmm16, %xmm1
; CHECK: %xmm16 = VMULSSZrr_Int %xmm16, %xmm1
%xmm16 = VMULSSZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VSUBSDZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VSUBSDZrm %xmm16, %rip, 1, _, %rax, _
- ; CHECK: %xmm16 = VSUBSDZrm_Int %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VSUBSDZrm_Int %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VSUBSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VSUBSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm16 = VSUBSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VSUBSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VSUBSDZrr %xmm16, %xmm1
%xmm16 = VSUBSDZrr %xmm16, %xmm1
; CHECK: %xmm16 = VSUBSDZrr_Int %xmm16, %xmm1
%xmm16 = VSUBSDZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VSUBSSZrm %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VSUBSSZrm %xmm16, %rip, 1, _, %rax, _
- ; CHECK: %xmm16 = VSUBSSZrm_Int %xmm16, %rip, 1, _, %rax, _
- %xmm16 = VSUBSSZrm_Int %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VSUBSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VSUBSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
+ ; CHECK: %xmm16 = VSUBSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
+ %xmm16 = VSUBSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
; CHECK: %xmm16 = VSUBSSZrr %xmm16, %xmm1
%xmm16 = VSUBSSZrr %xmm16, %xmm1
; CHECK: %xmm16 = VSUBSSZrr_Int %xmm16, %xmm1
%xmm16 = VSUBSSZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VFMADD132SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD132SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADD132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADD132SDZr %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD132SDZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFMADD132SDZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD132SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD132SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD132SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADD132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADD132SSZr %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD132SSZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFMADD132SSZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD132SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD213SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD213SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADD213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADD213SDZr %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD213SDZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFMADD213SDZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD213SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD213SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD213SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADD213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADD213SSZr %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD213SSZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFMADD213SSZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD213SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD231SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD231SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADD231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADD231SDZr %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD231SDZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFMADD231SDZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD231SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD231SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD231SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMADD231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMADD231SSZr %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD231SSZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFMADD231SSZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFMADD231SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB132SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB132SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUB132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUB132SDZr %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB132SDZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFMSUB132SDZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB132SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB132SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB132SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUB132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUB132SSZr %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB132SSZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFMSUB132SSZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB132SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB213SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB213SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUB213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUB213SDZr %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB213SDZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFMSUB213SDZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB213SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB213SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB213SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUB213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUB213SSZr %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB213SSZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFMSUB213SSZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB213SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB231SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB231SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUB231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUB231SDZr %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB231SDZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFMSUB231SDZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB231SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB231SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB231SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFMSUB231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFMSUB231SSZr %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB231SSZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFMSUB231SSZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFMSUB231SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD132SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD132SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFNMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMADD132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFNMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMADD132SDZr %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD132SDZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFNMADD132SDZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD132SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD132SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD132SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFNMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMADD132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFNMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMADD132SSZr %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD132SSZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFNMADD132SSZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD132SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD213SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD213SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFNMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMADD213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFNMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMADD213SDZr %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD213SDZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFNMADD213SDZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD213SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD213SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD213SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFNMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMADD213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFNMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMADD213SSZr %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD213SSZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFNMADD213SSZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD213SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD231SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD231SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFNMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMADD231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFNMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMADD231SDZr %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD231SDZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFNMADD231SDZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD231SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD231SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD231SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFNMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMADD231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFNMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMADD231SSZr %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD231SSZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFNMADD231SSZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFNMADD231SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB132SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB132SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFNMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMSUB132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFNMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMSUB132SDZr %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB132SDZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFNMSUB132SDZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB132SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB132SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB132SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFNMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMSUB132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFNMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMSUB132SSZr %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB132SSZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFNMSUB132SSZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB132SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB213SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB213SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFNMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMSUB213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFNMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMSUB213SDZr %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB213SDZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFNMSUB213SDZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB213SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB213SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB213SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFNMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMSUB213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFNMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMSUB213SSZr %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB213SSZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFNMSUB213SSZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB213SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB231SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB231SDZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFNMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMSUB231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFNMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMSUB231SDZr %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB231SDZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFNMSUB231SDZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB231SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB231SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB231SSZm %xmm16, %xmm16, %rsi, 1, _, 0, _
- ; CHECK: %xmm16 = VFNMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
- %xmm16 = VFNMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, _, 0, _
+ ; CHECK: %xmm16 = VFNMSUB231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VFNMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
+ %xmm16 = VFNMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
; CHECK: %xmm16 = VFNMSUB231SSZr %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB231SSZr %xmm16, %xmm1, %xmm2
; CHECK: %xmm16 = VFNMSUB231SSZr_Int %xmm16, %xmm1, %xmm2
%xmm16 = VFNMSUB231SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: VPEXTRBZmr %rdi, 1, _, 0, _, %xmm16, 3
- VPEXTRBZmr %rdi, 1, _, 0, _, %xmm16, 3
+ ; CHECK: VPEXTRBZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
+ VPEXTRBZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
; CHECK: %eax = VPEXTRBZrr %xmm16, 1
%eax = VPEXTRBZrr %xmm16, 1
- ; CHECK: VPEXTRDZmr %rdi, 1, _, 0, _, %xmm16, 3
- VPEXTRDZmr %rdi, 1, _, 0, _, %xmm16, 3
+ ; CHECK: VPEXTRDZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
+ VPEXTRDZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
; CHECK: %eax = VPEXTRDZrr %xmm16, 1
%eax = VPEXTRDZrr %xmm16, 1
- ; CHECK: VPEXTRQZmr %rdi, 1, _, 0, _, %xmm16, 3
- VPEXTRQZmr %rdi, 1, _, 0, _, %xmm16, 3
+ ; CHECK: VPEXTRQZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
+ VPEXTRQZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
; CHECK: %rax = VPEXTRQZrr %xmm16, 1
%rax = VPEXTRQZrr %xmm16, 1
- ; CHECK: VPEXTRWZmr %rdi, 1, _, 0, _, %xmm16, 3
- VPEXTRWZmr %rdi, 1, _, 0, _, %xmm16, 3
+ ; CHECK: VPEXTRWZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
+ VPEXTRWZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
; CHECK: %eax = VPEXTRWZrr %xmm16, 1
%eax = VPEXTRWZrr %xmm16, 1
; CHECK: %eax = VPEXTRWZrr_REV %xmm16, 1
%eax = VPEXTRWZrr_REV %xmm16, 1
- ; CHECK: %xmm16 = VPINSRBZrm %xmm16, %rsi, 1, _, 0, _, 3
- %xmm16 = VPINSRBZrm %xmm16, %rsi, 1, _, 0, _, 3
+ ; CHECK: %xmm16 = VPINSRBZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
+ %xmm16 = VPINSRBZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
; CHECK: %xmm16 = VPINSRBZrr %xmm16, %edi, 5
%xmm16 = VPINSRBZrr %xmm16, %edi, 5
- ; CHECK: %xmm16 = VPINSRDZrm %xmm16, %rsi, 1, _, 0, _, 3
- %xmm16 = VPINSRDZrm %xmm16, %rsi, 1, _, 0, _, 3
+ ; CHECK: %xmm16 = VPINSRDZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
+ %xmm16 = VPINSRDZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
; CHECK: %xmm16 = VPINSRDZrr %xmm16, %edi, 5
%xmm16 = VPINSRDZrr %xmm16, %edi, 5
- ; CHECK: %xmm16 = VPINSRQZrm %xmm16, %rsi, 1, _, 0, _, 3
- %xmm16 = VPINSRQZrm %xmm16, %rsi, 1, _, 0, _, 3
+ ; CHECK: %xmm16 = VPINSRQZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
+ %xmm16 = VPINSRQZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
; CHECK: %xmm16 = VPINSRQZrr %xmm16, %rdi, 5
%xmm16 = VPINSRQZrr %xmm16, %rdi, 5
- ; CHECK: %xmm16 = VPINSRWZrm %xmm16, %rsi, 1, _, 0, _, 3
- %xmm16 = VPINSRWZrm %xmm16, %rsi, 1, _, 0, _, 3
+ ; CHECK: %xmm16 = VPINSRWZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
+ %xmm16 = VPINSRWZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
; CHECK: %xmm16 = VPINSRWZrr %xmm16, %edi, 5
%xmm16 = VPINSRWZrr %xmm16, %edi, 5
- ; CHECK: %xmm16 = VSQRTSDZm %xmm16, _, _, _, _, _
- %xmm16 = VSQRTSDZm %xmm16, _, _, _, _, _
- ; CHECK: %xmm16 = VSQRTSDZm_Int %xmm16, _, _, _, _, _
- %xmm16 = VSQRTSDZm_Int %xmm16, _, _, _, _, _
- ; CHECK: %xmm16 = VSQRTSDZr %xmm16, _
- %xmm16 = VSQRTSDZr %xmm16, _
- ; CHECK: %xmm16 = VSQRTSDZr_Int %xmm16, _
- %xmm16 = VSQRTSDZr_Int %xmm16, _
- ; CHECK: %xmm16 = VSQRTSSZm %xmm16, _, _, _, _, _
- %xmm16 = VSQRTSSZm %xmm16, _, _, _, _, _
- ; CHECK: %xmm16 = VSQRTSSZm_Int %xmm16, _, _, _, _, _
- %xmm16 = VSQRTSSZm_Int %xmm16, _, _, _, _, _
- ; CHECK: %xmm16 = VSQRTSSZr %xmm16, _
- %xmm16 = VSQRTSSZr %xmm16, _
- ; CHECK: %xmm16 = VSQRTSSZr_Int %xmm16, _
- %xmm16 = VSQRTSSZr_Int %xmm16, _
- ; CHECK: %rdi = VCVTSD2SI64Zrm %rdi, %xmm16, 1, _, 0
- %rdi = VCVTSD2SI64Zrm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %xmm16 = VSQRTSDZm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VSQRTSDZm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm16 = VSQRTSDZm_Int %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VSQRTSDZm_Int %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm16 = VSQRTSDZr %xmm16, %noreg
+ %xmm16 = VSQRTSDZr %xmm16, %noreg
+ ; CHECK: %xmm16 = VSQRTSDZr_Int %xmm16, %noreg
+ %xmm16 = VSQRTSDZr_Int %xmm16, %noreg
+ ; CHECK: %xmm16 = VSQRTSSZm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VSQRTSSZm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm16 = VSQRTSSZm_Int %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VSQRTSSZm_Int %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm16 = VSQRTSSZr %xmm16, %noreg
+ %xmm16 = VSQRTSSZr %xmm16, %noreg
+ ; CHECK: %xmm16 = VSQRTSSZr_Int %xmm16, %noreg
+ %xmm16 = VSQRTSSZr_Int %xmm16, %noreg
+ ; CHECK: %rdi = VCVTSD2SI64Zrm %rdi, %xmm16, 1, %noreg, 0
+ %rdi = VCVTSD2SI64Zrm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %rdi = VCVTSD2SI64Zrr %xmm16
%rdi = VCVTSD2SI64Zrr %xmm16
- ; CHECK: %edi = VCVTSD2SIZrm %rdi, %xmm16, 1, _, 0
- %edi = VCVTSD2SIZrm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %edi = VCVTSD2SIZrm %rdi, %xmm16, 1, %noreg, 0
+ %edi = VCVTSD2SIZrm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %edi = VCVTSD2SIZrr %xmm16
%edi = VCVTSD2SIZrr %xmm16
- ; CHECK: %xmm16 = VCVTSD2SSZrm %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VCVTSD2SSZrm %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VCVTSD2SSZrm_Int %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VCVTSD2SSZrm_Int %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VCVTSD2SSZrr %xmm16, _
- %xmm16 = VCVTSD2SSZrr %xmm16, _
- ; CHECK: %xmm16 = VCVTSD2SSZrr_Int %xmm16, _
- %xmm16 = VCVTSD2SSZrr_Int %xmm16, _
- ; CHECK: %xmm16 = VCVTSI2SDZrm %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VCVTSI2SDZrm %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VCVTSI2SDZrm_Int %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VCVTSI2SDZrm_Int %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VCVTSI2SDZrr %xmm16, _
- %xmm16 = VCVTSI2SDZrr %xmm16, _
- ; CHECK: %xmm16 = VCVTSI2SDZrr_Int %xmm16, _
- %xmm16 = VCVTSI2SDZrr_Int %xmm16, _
- ; CHECK: %xmm16 = VCVTSI2SSZrm %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VCVTSI2SSZrm %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VCVTSI2SSZrm_Int %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VCVTSI2SSZrm_Int %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VCVTSI2SSZrr %xmm16, _
- %xmm16 = VCVTSI2SSZrr %xmm16, _
- ; CHECK: %xmm16 = VCVTSI2SSZrr_Int %xmm16, _
- %xmm16 = VCVTSI2SSZrr_Int %xmm16, _
- ; CHECK: %xmm16 = VCVTSI642SDZrm %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VCVTSI642SDZrm %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VCVTSI642SDZrm_Int %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VCVTSI642SDZrm_Int %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VCVTSI642SDZrr %xmm16, _
- %xmm16 = VCVTSI642SDZrr %xmm16, _
- ; CHECK: %xmm16 = VCVTSI642SDZrr_Int %xmm16, _
- %xmm16 = VCVTSI642SDZrr_Int %xmm16, _
- ; CHECK: %xmm16 = VCVTSI642SSZrm %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VCVTSI642SSZrm %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VCVTSI642SSZrm_Int %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VCVTSI642SSZrm_Int %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VCVTSI642SSZrr %xmm16, _
- %xmm16 = VCVTSI642SSZrr %xmm16, _
- ; CHECK: %xmm16 = VCVTSI642SSZrr_Int %xmm16, _
- %xmm16 = VCVTSI642SSZrr_Int %xmm16, _
- ; CHECK: %xmm16 = VCVTSS2SDZrm %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VCVTSS2SDZrm %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VCVTSS2SDZrm_Int %xmm16, %rdi, 1, _, 0, _
- %xmm16 = VCVTSS2SDZrm_Int %xmm16, %rdi, 1, _, 0, _
- ; CHECK: %xmm16 = VCVTSS2SDZrr %xmm16, _
- %xmm16 = VCVTSS2SDZrr %xmm16, _
- ; CHECK: %xmm16 = VCVTSS2SDZrr_Int %xmm16, _
- %xmm16 = VCVTSS2SDZrr_Int %xmm16, _
- ; CHECK: %rdi = VCVTSS2SI64Zrm %rdi, %xmm16, 1, _, 0
- %rdi = VCVTSS2SI64Zrm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %xmm16 = VCVTSD2SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VCVTSD2SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VCVTSD2SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VCVTSD2SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VCVTSD2SSZrr %xmm16, %noreg
+ %xmm16 = VCVTSD2SSZrr %xmm16, %noreg
+ ; CHECK: %xmm16 = VCVTSD2SSZrr_Int %xmm16, %noreg
+ %xmm16 = VCVTSD2SSZrr_Int %xmm16, %noreg
+ ; CHECK: %xmm16 = VCVTSI2SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VCVTSI2SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VCVTSI2SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VCVTSI2SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VCVTSI2SDZrr %xmm16, %noreg
+ %xmm16 = VCVTSI2SDZrr %xmm16, %noreg
+ ; CHECK: %xmm16 = VCVTSI2SDZrr_Int %xmm16, %noreg
+ %xmm16 = VCVTSI2SDZrr_Int %xmm16, %noreg
+ ; CHECK: %xmm16 = VCVTSI2SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VCVTSI2SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VCVTSI2SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VCVTSI2SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VCVTSI2SSZrr %xmm16, %noreg
+ %xmm16 = VCVTSI2SSZrr %xmm16, %noreg
+ ; CHECK: %xmm16 = VCVTSI2SSZrr_Int %xmm16, %noreg
+ %xmm16 = VCVTSI2SSZrr_Int %xmm16, %noreg
+ ; CHECK: %xmm16 = VCVTSI642SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VCVTSI642SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VCVTSI642SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VCVTSI642SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VCVTSI642SDZrr %xmm16, %noreg
+ %xmm16 = VCVTSI642SDZrr %xmm16, %noreg
+ ; CHECK: %xmm16 = VCVTSI642SDZrr_Int %xmm16, %noreg
+ %xmm16 = VCVTSI642SDZrr_Int %xmm16, %noreg
+ ; CHECK: %xmm16 = VCVTSI642SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VCVTSI642SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VCVTSI642SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VCVTSI642SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VCVTSI642SSZrr %xmm16, %noreg
+ %xmm16 = VCVTSI642SSZrr %xmm16, %noreg
+ ; CHECK: %xmm16 = VCVTSI642SSZrr_Int %xmm16, %noreg
+ %xmm16 = VCVTSI642SSZrr_Int %xmm16, %noreg
+ ; CHECK: %xmm16 = VCVTSS2SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VCVTSS2SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VCVTSS2SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
+ %xmm16 = VCVTSS2SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
+ ; CHECK: %xmm16 = VCVTSS2SDZrr %xmm16, %noreg
+ %xmm16 = VCVTSS2SDZrr %xmm16, %noreg
+ ; CHECK: %xmm16 = VCVTSS2SDZrr_Int %xmm16, %noreg
+ %xmm16 = VCVTSS2SDZrr_Int %xmm16, %noreg
+ ; CHECK: %rdi = VCVTSS2SI64Zrm %rdi, %xmm16, 1, %noreg, 0
+ %rdi = VCVTSS2SI64Zrm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %rdi = VCVTSS2SI64Zrr %xmm16
%rdi = VCVTSS2SI64Zrr %xmm16
- ; CHECK: %edi = VCVTSS2SIZrm %rdi, %xmm16, 1, _, 0
- %edi = VCVTSS2SIZrm %rdi, %xmm16, 1, _, 0
+ ; CHECK: %edi = VCVTSS2SIZrm %rdi, %xmm16, 1, %noreg, 0
+ %edi = VCVTSS2SIZrm %rdi, %xmm16, 1, %noreg, 0
; CHECK: %edi = VCVTSS2SIZrr %xmm16
%edi = VCVTSS2SIZrr %xmm16
- ; CHECK: %rdi = VCVTTSD2SI64Zrm %rdi, %xmm16, 1, _, 0
- %rdi = VCVTTSD2SI64Zrm %rdi, %xmm16, 1, _, 0
- ; CHECK: %rdi = VCVTTSD2SI64Zrm_Int %rdi, %xmm16, 1, _, 0
- %rdi = VCVTTSD2SI64Zrm_Int %rdi, %xmm16, 1, _, 0
+ ; CHECK: %rdi = VCVTTSD2SI64Zrm %rdi, %xmm16, 1, %noreg, 0
+ %rdi = VCVTTSD2SI64Zrm %rdi, %xmm16, 1, %noreg, 0
+ ; CHECK: %rdi = VCVTTSD2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0
+ %rdi = VCVTTSD2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0
; CHECK: %rdi = VCVTTSD2SI64Zrr %xmm16
%rdi = VCVTTSD2SI64Zrr %xmm16
; CHECK: %rdi = VCVTTSD2SI64Zrr_Int %xmm16
%rdi = VCVTTSD2SI64Zrr_Int %xmm16
- ; CHECK: %edi = VCVTTSD2SIZrm %rdi, %xmm16, 1, _, 0
- %edi = VCVTTSD2SIZrm %rdi, %xmm16, 1, _, 0
- ; CHECK: %edi = VCVTTSD2SIZrm_Int %rdi, %xmm16, 1, _, 0
- %edi = VCVTTSD2SIZrm_Int %rdi, %xmm16, 1, _, 0
+ ; CHECK: %edi = VCVTTSD2SIZrm %rdi, %xmm16, 1, %noreg, 0
+ %edi = VCVTTSD2SIZrm %rdi, %xmm16, 1, %noreg, 0
+ ; CHECK: %edi = VCVTTSD2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0
+ %edi = VCVTTSD2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0
; CHECK: %edi = VCVTTSD2SIZrr %xmm16
%edi = VCVTTSD2SIZrr %xmm16
; CHECK: %edi = VCVTTSD2SIZrr_Int %xmm16
%edi = VCVTTSD2SIZrr_Int %xmm16
- ; CHECK: %rdi = VCVTTSS2SI64Zrm %rdi, %xmm16, 1, _, 0
- %rdi = VCVTTSS2SI64Zrm %rdi, %xmm16, 1, _, 0
- ; CHECK: %rdi = VCVTTSS2SI64Zrm_Int %rdi, %xmm16, 1, _, 0
- %rdi = VCVTTSS2SI64Zrm_Int %rdi, %xmm16, 1, _, 0
+ ; CHECK: %rdi = VCVTTSS2SI64Zrm %rdi, %xmm16, 1, %noreg, 0
+ %rdi = VCVTTSS2SI64Zrm %rdi, %xmm16, 1, %noreg, 0
+ ; CHECK: %rdi = VCVTTSS2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0
+ %rdi = VCVTTSS2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0
; CHECK: %rdi = VCVTTSS2SI64Zrr %xmm16
%rdi = VCVTTSS2SI64Zrr %xmm16
; CHECK: %rdi = VCVTTSS2SI64Zrr_Int %xmm16
%rdi = VCVTTSS2SI64Zrr_Int %xmm16
- ; CHECK: %edi = VCVTTSS2SIZrm %rdi, %xmm16, 1, _, 0
- %edi = VCVTTSS2SIZrm %rdi, %xmm16, 1, _, 0
- ; CHECK: %edi = VCVTTSS2SIZrm_Int %rdi, %xmm16, 1, _, 0
- %edi = VCVTTSS2SIZrm_Int %rdi, %xmm16, 1, _, 0
+ ; CHECK: %edi = VCVTTSS2SIZrm %rdi, %xmm16, 1, %noreg, 0
+ %edi = VCVTTSS2SIZrm %rdi, %xmm16, 1, %noreg, 0
+ ; CHECK: %edi = VCVTTSS2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0
+ %edi = VCVTTSS2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0
; CHECK: %edi = VCVTTSS2SIZrr %xmm16
%edi = VCVTTSS2SIZrr %xmm16
; CHECK: %edi = VCVTTSS2SIZrr_Int %xmm16
%edi = VCVTTSS2SIZrr_Int %xmm16
; CHECK: %xmm16 = VMOV64toSDZrr %rdi
%xmm16 = VMOV64toSDZrr %rdi
- ; CHECK: %xmm16 = VMOVDI2SSZrm %rip, _, _, _, _
- %xmm16 = VMOVDI2SSZrm %rip, _, _, _, _
+ ; CHECK: %xmm16 = VMOVDI2SSZrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VMOVDI2SSZrm %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VMOVDI2SSZrr %eax
%xmm16 = VMOVDI2SSZrr %eax
- ; CHECK: VMOVSDZmr %rdi, %xmm16, _, _, _, _
- VMOVSDZmr %rdi, %xmm16, _, _, _, _
- ; CHECK: %xmm16 = VMOVSDZrm %rip, _, _, _, _
- %xmm16 = VMOVSDZrm %rip, _, _, _, _
- ; CHECK: %xmm16 = VMOVSDZrr %xmm16, _
- %xmm16 = VMOVSDZrr %xmm16, _
- ; CHECK: %xmm16 = VMOVSDZrr_REV %xmm16, _
- %xmm16 = VMOVSDZrr_REV %xmm16, _
+ ; CHECK: VMOVSDZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
+ VMOVSDZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm16 = VMOVSDZrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VMOVSDZrm %rip, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm16 = VMOVSDZrr %xmm16, %noreg
+ %xmm16 = VMOVSDZrr %xmm16, %noreg
+ ; CHECK: %xmm16 = VMOVSDZrr_REV %xmm16, %noreg
+ %xmm16 = VMOVSDZrr_REV %xmm16, %noreg
; CHECK: %rax = VMOVSDto64Zrr %xmm16
%rax = VMOVSDto64Zrr %xmm16
- ; CHECK: VMOVSDto64Zmr %rdi, %xmm16, _, _, _, _
- VMOVSDto64Zmr %rdi, %xmm16, _, _, _, _
- ; CHECK: VMOVSSZmr %rdi, %xmm16, _, _, _, _
- VMOVSSZmr %rdi, %xmm16, _, _, _, _
- ; CHECK: %xmm16 = VMOVSSZrm %rip, _, _, _, _
- %xmm16 = VMOVSSZrm %rip, _, _, _, _
- ; CHECK: %xmm16 = VMOVSSZrr %xmm16, _
- %xmm16 = VMOVSSZrr %xmm16, _
- ; CHECK: %xmm16 = VMOVSSZrr_REV %xmm16, _
- %xmm16 = VMOVSSZrr_REV %xmm16, _
- ; CHECK: VMOVSS2DIZmr %rdi, %xmm16, _, _, _, _
- VMOVSS2DIZmr %rdi, %xmm16, _, _, _, _
+ ; CHECK: VMOVSDto64Zmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
+ VMOVSDto64Zmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: VMOVSSZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
+ VMOVSSZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm16 = VMOVSSZrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VMOVSSZrm %rip, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm16 = VMOVSSZrr %xmm16, %noreg
+ %xmm16 = VMOVSSZrr %xmm16, %noreg
+ ; CHECK: %xmm16 = VMOVSSZrr_REV %xmm16, %noreg
+ %xmm16 = VMOVSSZrr_REV %xmm16, %noreg
+ ; CHECK: VMOVSS2DIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
+ VMOVSS2DIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
; CHECK: %eax = VMOVSS2DIZrr %xmm16
%eax = VMOVSS2DIZrr %xmm16
; CHECK: %xmm16 = VMOV64toPQIZrr %rdi
%xmm16 = VMOV64toPQIZrr %rdi
- ; CHECK: %xmm16 = VMOV64toPQIZrm %rdi, _, _, _, _
- %xmm16 = VMOV64toPQIZrm %rdi, _, _, _, _
+ ; CHECK: %xmm16 = VMOV64toPQIZrm %rdi, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VMOV64toPQIZrm %rdi, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VMOV64toSDZrr %rdi
%xmm16 = VMOV64toSDZrr %rdi
- ; CHECK: %xmm16 = VMOVDI2PDIZrm %rip, _, _, _, _
- %xmm16 = VMOVDI2PDIZrm %rip, _, _, _, _
+ ; CHECK: %xmm16 = VMOVDI2PDIZrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VMOVDI2PDIZrm %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VMOVDI2PDIZrr %edi
%xmm16 = VMOVDI2PDIZrr %edi
- ; CHECK: %xmm16 = VMOVLHPSZrr %xmm16, _
- %xmm16 = VMOVLHPSZrr %xmm16, _
- ; CHECK: %xmm16 = VMOVHLPSZrr %xmm16, _
- %xmm16 = VMOVHLPSZrr %xmm16, _
- ; CHECK: VMOVPDI2DIZmr %rdi, %xmm16, _, _, _, _
- VMOVPDI2DIZmr %rdi, %xmm16, _, _, _, _
+ ; CHECK: %xmm16 = VMOVLHPSZrr %xmm16, %noreg
+ %xmm16 = VMOVLHPSZrr %xmm16, %noreg
+ ; CHECK: %xmm16 = VMOVHLPSZrr %xmm16, %noreg
+ %xmm16 = VMOVHLPSZrr %xmm16, %noreg
+ ; CHECK: VMOVPDI2DIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
+ VMOVPDI2DIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
; CHECK: %edi = VMOVPDI2DIZrr %xmm16
%edi = VMOVPDI2DIZrr %xmm16
; CHECK: %xmm16 = VMOVPQI2QIZrr %xmm16
%xmm16 = VMOVPQI2QIZrr %xmm16
- ; CHECK: VMOVPQI2QIZmr %rdi, %xmm16, _, _, _, _
- VMOVPQI2QIZmr %rdi, %xmm16, _, _, _, _
+ ; CHECK: VMOVPQI2QIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
+ VMOVPQI2QIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
; CHECK: %rdi = VMOVPQIto64Zrr %xmm16
%rdi = VMOVPQIto64Zrr %xmm16
- ; CHECK: VMOVPQIto64Zmr %rdi, %xmm16, _, _, _, _
- VMOVPQIto64Zmr %rdi, %xmm16, _, _, _, _
- ; CHECK: %xmm16 = VMOVQI2PQIZrm %rip, _, _, _, _
- %xmm16 = VMOVQI2PQIZrm %rip, _, _, _, _
+ ; CHECK: VMOVPQIto64Zmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
+ VMOVPQIto64Zmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
+ ; CHECK: %xmm16 = VMOVQI2PQIZrm %rip, %noreg, %noreg, %noreg, %noreg
+ %xmm16 = VMOVQI2PQIZrm %rip, %noreg, %noreg, %noreg, %noreg
; CHECK: %xmm16 = VMOVZPQILo2PQIZrr %xmm16
%xmm16 = VMOVZPQILo2PQIZrr %xmm16
- ; CHECK: Int_VCOMISDZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
- Int_VCOMISDZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: Int_VCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ Int_VCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: Int_VCOMISDZrr %xmm16, %xmm1, implicit-def %eflags
Int_VCOMISDZrr %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: Int_VCOMISSZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
- Int_VCOMISSZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: Int_VCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ Int_VCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: Int_VCOMISSZrr %xmm16, %xmm1, implicit-def %eflags
Int_VCOMISSZrr %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: Int_VUCOMISDZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
- Int_VUCOMISDZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: Int_VUCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ Int_VUCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: Int_VUCOMISDZrr %xmm16, %xmm1, implicit-def %eflags
Int_VUCOMISDZrr %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: Int_VUCOMISSZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
- Int_VUCOMISSZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: Int_VUCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ Int_VUCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: Int_VUCOMISSZrr %xmm16, %xmm1, implicit-def %eflags
Int_VUCOMISSZrr %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: VCOMISDZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
- VCOMISDZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: VCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ VCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: VCOMISDZrr %xmm16, %xmm1, implicit-def %eflags
VCOMISDZrr %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: VCOMISSZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
- VCOMISSZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: VCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ VCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: VCOMISSZrr %xmm16, %xmm1, implicit-def %eflags
VCOMISSZrr %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: VUCOMISDZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
- VUCOMISDZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: VUCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ VUCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: VUCOMISDZrr %xmm16, %xmm1, implicit-def %eflags
VUCOMISDZrr %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: VUCOMISSZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
- VUCOMISSZrm %xmm16, %rdi, _, _, _, _, implicit-def %eflags
+ ; CHECK: VUCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
+ VUCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
; CHECK: VUCOMISSZrr %xmm16, %xmm1, implicit-def %eflags
VUCOMISSZrr %xmm16, %xmm1, implicit-def %eflags
diff --git a/test/CodeGen/X86/expand-vr64-gr64-copy.mir b/test/CodeGen/X86/expand-vr64-gr64-copy.mir
index 3598c045ad5..ceb7d394af2 100644
--- a/test/CodeGen/X86/expand-vr64-gr64-copy.mir
+++ b/test/CodeGen/X86/expand-vr64-gr64-copy.mir
@@ -23,14 +23,14 @@ body: |
liveins: %xmm0
%xmm0 = PSHUFDri killed %xmm0, -24
- MOVPQI2QImr %rsp, 1, _, -8, _, killed %xmm0
- %mm0 = PSWAPDrm %rsp, 1, _, -8, _
+ MOVPQI2QImr %rsp, 1, %noreg, -8, %noreg, killed %xmm0
+ %mm0 = PSWAPDrm %rsp, 1, %noreg, -8, %noreg
; CHECK: %rax = MMX_MOVD64from64rr %mm0
; CHECK-NEXT: %mm0 = MMX_MOVD64to64rr %rax
%rax = COPY %mm0
%mm0 = COPY %rax
- MMX_MOVQ64mr %rsp, 1, _, -16, _, killed %mm0
- %xmm0 = MOVQI2PQIrm %rsp, 1, _, -16, _
+ MMX_MOVQ64mr %rsp, 1, %noreg, -16, %noreg, killed %mm0
+ %xmm0 = MOVQI2PQIrm %rsp, 1, %noreg, -16, %noreg
%xmm0 = PSHUFDri killed %xmm0, -44
RETQ %xmm0
...
diff --git a/test/CodeGen/X86/fcmove.ll b/test/CodeGen/X86/fcmove.ll
index 21cc683f734..35dbb68117b 100644
--- a/test/CodeGen/X86/fcmove.ll
+++ b/test/CodeGen/X86/fcmove.ll
@@ -12,4 +12,4 @@ define x86_fp80 @cmove_f(x86_fp80 %a, x86_fp80 %b, i32 %c) {
%add = fadd x86_fp80 %a, %b
%ret = select i1 %test, x86_fp80 %add, x86_fp80 %b
ret x86_fp80 %ret
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/X86/fixup-bw-inst.mir b/test/CodeGen/X86/fixup-bw-inst.mir
index 77d13bd0651..6638934f714 100644
--- a/test/CodeGen/X86/fixup-bw-inst.mir
+++ b/test/CodeGen/X86/fixup-bw-inst.mir
@@ -53,7 +53,7 @@ body: |
bb.0:
liveins: %rax
- %ax = MOV16rm killed %rax, 1, _, 0, _
+ %ax = MOV16rm killed %rax, 1, %noreg, 0, %noreg
; CHECK: %eax = MOVZX32rm16 killed %rax
RETQ %ax
@@ -138,8 +138,8 @@ body: |
bb.2.if.then:
liveins: %rdi
- %ax = MOV16rm killed %rdi, 1, _, 0, _, implicit-def %eax :: (load 2 from %ir.p)
- ; CHECK: %eax = MOVZX32rm16 killed %rdi, 1, _, 0, _, implicit-def %eax :: (load 2 from %ir.p)
+ %ax = MOV16rm killed %rdi, 1, %noreg, 0, %noreg, implicit-def %eax :: (load 2 from %ir.p)
+ ; CHECK: %eax = MOVZX32rm16 killed %rdi, 1, %noreg, 0, %noreg, implicit-def %eax :: (load 2 from %ir.p)
%ax = KILL %ax, implicit killed %eax
RETQ %ax
diff --git a/test/CodeGen/X86/i486-fence-loop.ll b/test/CodeGen/X86/i486-fence-loop.ll
index 9bf75b2ee7c..18556f261c3 100644
--- a/test/CodeGen/X86/i486-fence-loop.ll
+++ b/test/CodeGen/X86/i486-fence-loop.ll
@@ -23,4 +23,4 @@ while.body:
if.then:
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/X86/implicit-null-checks.mir b/test/CodeGen/X86/implicit-null-checks.mir
index df89e4f6bfa..889c4834de2 100644
--- a/test/CodeGen/X86/implicit-null-checks.mir
+++ b/test/CodeGen/X86/implicit-null-checks.mir
@@ -391,7 +391,7 @@ liveins:
- { reg: '%esi' }
# CHECK: bb.0.entry:
# CHECK: %eax = MOV32ri 2200000
-# CHECK-NEXT: %eax = FAULTING_OP 1, %bb.3.is_null, {{[0-9]+}}, %eax, %rdi, 1, _, 0, _, implicit-def %eflags :: (load 4 from %ir.x)
+# CHECK-NEXT: %eax = FAULTING_OP 1, %bb.3.is_null, {{[0-9]+}}, %eax, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x)
# CHECK-NEXT: JMP_1 %bb.1.not_null
body: |
@@ -405,7 +405,7 @@ body: |
liveins: %esi, %rdi
%eax = MOV32ri 2200000
- %eax = AND32rm killed %eax, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x)
+ %eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
CMP32rr killed %eax, killed %esi, implicit-def %eflags
JE_1 %bb.4.ret_100, implicit %eflags
@@ -431,7 +431,7 @@ liveins:
- { reg: '%esi' }
- { reg: '%rdx' }
# CHECK: bb.0.entry:
-# CHECK: %eax = MOV32rm killed %rdx, 1, _, 0, _ :: (volatile load 4 from %ir.ptr)
+# CHECK: %eax = MOV32rm killed %rdx, 1, %noreg, 0, %noreg :: (volatile load 4 from %ir.ptr)
# CHECK-NEXT: TEST64rr %rdi, %rdi, implicit-def %eflags
# CHECK-NEXT: JE_1 %bb.3.is_null, implicit %eflags
@@ -439,7 +439,7 @@ body: |
bb.0.entry:
liveins: %esi, %rdi, %rdx
- %eax = MOV32rm killed %rdx, 1, _, 0, _ :: (volatile load 4 from %ir.ptr)
+ %eax = MOV32rm killed %rdx, 1, %noreg, 0, %noreg :: (volatile load 4 from %ir.ptr)
TEST64rr %rdi, %rdi, implicit-def %eflags
JE_1 %bb.3.is_null, implicit %eflags
@@ -447,7 +447,7 @@ body: |
liveins: %esi, %rdi
%eax = MOV32ri 2200000
- %eax = AND32rm killed %eax, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x)
+ %eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
CMP32rr killed %eax, killed %esi, implicit-def %eflags
JE_1 %bb.4.ret_100, implicit %eflags
@@ -489,7 +489,7 @@ body: |
%eax = MOV32ri 2200000
%eax = ADD32ri killed %eax, 100, implicit-def dead %eflags
- %eax = AND32rm killed %eax, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x)
+ %eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
CMP32rr killed %eax, killed %esi, implicit-def %eflags
JE_1 %bb.4.ret_100, implicit %eflags
@@ -529,7 +529,7 @@ body: |
liveins: %rsi, %rdi
%rdi = MOV64ri 5000
- %rdi = AND64rm killed %rdi, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x)
+ %rdi = AND64rm killed %rdi, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
CMP64rr killed %rdi, killed %rsi, implicit-def %eflags
JE_1 %bb.4.ret_100, implicit %eflags
@@ -556,7 +556,7 @@ liveins:
- { reg: '%rsi' }
# CHECK: bb.0.entry:
# CHECK: %rbx = MOV64rr %rdx
-# CHECK-NEXT: %rbx = FAULTING_OP 1, %bb.3.is_null, {{[0-9]+}}, %rbx, %rdi, 1, _, 0, _, implicit-def %eflags :: (load 4 from %ir.x)
+# CHECK-NEXT: %rbx = FAULTING_OP 1, %bb.3.is_null, {{[0-9]+}}, %rbx, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x)
body: |
bb.0.entry:
@@ -569,7 +569,7 @@ body: |
liveins: %rsi, %rdi, %rdx
%rbx = MOV64rr %rdx
- %rbx = AND64rm killed %rbx, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x)
+ %rbx = AND64rm killed %rbx, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
%rdx = MOV64ri 0
CMP64rr killed %rbx, killed %rsi, implicit-def %eflags
JE_1 %bb.4.ret_100, implicit %eflags
@@ -617,7 +617,7 @@ body: |
liveins: %rbx
CALL64pcrel32 @f, csr_64, implicit %rsp, implicit-def %rsp
- %eax = MOV32rm killed %rbx, 1, _, 0, _ :: (load 4 from %ir.ptr)
+ %eax = MOV32rm killed %rbx, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
%rbx = POP64r implicit-def %rsp, implicit %rsp
RETQ %eax
@@ -653,10 +653,10 @@ body: |
bb.1.not_null:
liveins: %rdi, %rsi
- %rcx = MOV64rm killed %rsi, 1, _, 0, _ :: (load 8 from %ir.ptr2)
+ %rcx = MOV64rm killed %rsi, 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr2)
%esi = MOV32ri 3076
- %eax = BEXTR32rm killed %rdi, 1, _, 0, _, killed %esi, implicit-def dead %eflags :: (load 4 from %ir.ptr)
- %eax = ADD32rm killed %eax, killed %rcx, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.val)
+ %eax = BEXTR32rm killed %rdi, 1, %noreg, 0, %noreg, killed %esi, implicit-def dead %eflags :: (load 4 from %ir.ptr)
+ %eax = ADD32rm killed %eax, killed %rcx, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.val)
RETQ %eax
bb.2.is_null:
@@ -668,7 +668,7 @@ body: |
name: use_alternate_load_op
# CHECK-LABEL: name: use_alternate_load_op
# CHECK: bb.0.entry:
-# CHECK: %rax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, _, 0, _
+# CHECK: %rax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg
# CHECK-NEXT: JMP_1 %bb.1.not_null
# CHECK: bb.1.not_null
@@ -687,9 +687,9 @@ body: |
bb.1.not_null:
liveins: %rdi, %rsi
- %rcx = MOV64rm killed %rsi, 1, _, 0, _
- %rcx = AND64rm killed %rcx, %rdi, 1, _, 0, _, implicit-def dead %eflags
- %rax = MOV64rm killed %rdi, 1, _, 0, _
+ %rcx = MOV64rm killed %rsi, 1, %noreg, 0, %noreg
+ %rcx = AND64rm killed %rcx, %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags
+ %rax = MOV64rm killed %rdi, 1, %noreg, 0, %noreg
RETQ %eax
bb.2.is_null:
@@ -701,7 +701,7 @@ body: |
name: imp_null_check_gep_load_with_use_dep
# CHECK-LABEL: name: imp_null_check_gep_load_with_use_dep
# CHECK: bb.0.entry:
-# CHECK: %eax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, _, 0, _, implicit-def %rax :: (load 4 from %ir.x)
+# CHECK: %eax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from %ir.x)
# CHECK-NEXT: JMP_1 %bb.1.not_null
alignment: 4
tracksRegLiveness: true
@@ -719,8 +719,8 @@ body: |
liveins: %rdi, %rsi
%rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags
- %eax = MOV32rm killed %rdi, 1, _, 0, _, implicit-def %rax :: (load 4 from %ir.x)
- %eax = LEA64_32r killed %rax, 1, killed %rsi, 4, _
+ %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from %ir.x)
+ %eax = LEA64_32r killed %rax, 1, killed %rsi, 4, %noreg
RETQ %eax
bb.1.is_null:
@@ -733,7 +733,7 @@ name: imp_null_check_load_with_base_sep
# CHECK-LABEL: name: imp_null_check_load_with_base_sep
# CHECK: bb.0.entry:
# CHECK: %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags
-# CHECK-NEXT: %esi = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %esi, %rdi, 1, _, 0, _, implicit-def %eflags
+# CHECK-NEXT: %esi = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %esi, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags
# CHECK-NEXT: JMP_1 %bb.1.not_null
alignment: 4
tracksRegLiveness: true
@@ -751,7 +751,7 @@ body: |
liveins: %rdi, %rsi
%rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags
- %esi = AND32rm killed %esi, %rdi, 1, _, 0, _, implicit-def dead %eflags
+ %esi = AND32rm killed %esi, %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags
%eax = MOV32rr %esi
RETQ %eax
@@ -764,7 +764,7 @@ body: |
name: inc_store
# CHECK-LABEL: name: inc_store
# CHECK: bb.0.entry:
-# CHECK: _ = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, _, 0, _, %rsi
+# CHECK: %noreg = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %rsi
# CHECK-NEXT: JMP_1 %bb.1.not_null
# CHECK: bb.1.not_null
@@ -783,7 +783,7 @@ body: |
bb.1.not_null:
liveins: %rdi, %rsi
- MOV64mr killed %rdi, 1, _, 0, _, killed %rsi
+ MOV64mr killed %rdi, 1, %noreg, 0, %noreg, killed %rsi
RETQ
bb.2.is_null:
@@ -794,7 +794,7 @@ body: |
name: inc_store_plus_offset
# CHECK-LABEL: inc_store_plus_offset
# CHECK: bb.0.entry:
-# CHECK: _ = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, _, 16, _, %rsi
+# CHECK: %noreg = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %rsi
# CHECK-NEXT: JMP_1 %bb.1.not_null
# CHECK: bb.1.not_null
@@ -813,7 +813,7 @@ body: |
bb.1.not_null:
liveins: %rdi, %rsi
- MOV64mr killed %rdi, 1, _, 16, _, killed %rsi
+ MOV64mr killed %rdi, 1, %noreg, 16, %noreg, killed %rsi
RETQ
bb.2.is_null:
@@ -825,7 +825,7 @@ name: inc_store_with_dep
# CHECK-LABEL: inc_store_with_dep
# CHECK: bb.0.entry:
# CHECK: %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags
-# CHECK-NEXT: _ = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, _, 16, _, %esi
+# CHECK-NEXT: %noreg = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi
# CHECK-NEXT: JMP_1 %bb.1.not_null
# CHECK: bb.1.not_null
@@ -845,7 +845,7 @@ body: |
liveins: %rdi, %rsi
%esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags
- MOV32mr killed %rdi, 1, _, 16, _, killed %esi
+ MOV32mr killed %rdi, 1, %noreg, 16, %noreg, killed %esi
RETQ
bb.2.is_null:
@@ -876,7 +876,7 @@ body: |
liveins: %rdi, %rsi
%esi = ADD32rr %esi, %esi, implicit-def dead %eflags
- MOV32mr killed %rdi, 1, _, 0, _, %esi
+ MOV32mr killed %rdi, 1, %noreg, 0, %noreg, %esi
%eax = MOV32rr killed %esi
RETQ %eax
@@ -910,7 +910,7 @@ body: |
bb.1.not_null:
liveins: %rdi, %rsi
- MOV32mr killed %rdi, 1, _, 0, _, killed %esi :: (volatile store 4 into %ir.ptr)
+ MOV32mr killed %rdi, 1, %noreg, 0, %noreg, killed %esi :: (volatile store 4 into %ir.ptr)
RETQ
bb.2.is_null:
@@ -942,7 +942,7 @@ body: |
%esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags
%esi = ADD32ri killed %esi, 15, implicit-def dead %eflags
- MOV32mr killed %rdi, 1, _, 16, _, killed %esi
+ MOV32mr killed %rdi, 1, %noreg, 16, %noreg, killed %esi
RETQ
bb.2.is_null:
@@ -973,7 +973,7 @@ body: |
liveins: %rdi, %rsi
%rdi = ADD64rr killed %rdi, killed %rdi, implicit-def dead %eflags
- MOV32mr killed %rdi, 1, _, 16, _, killed %esi
+ MOV32mr killed %rdi, 1, %noreg, 16, %noreg, killed %esi
RETQ
bb.2.is_null:
@@ -984,7 +984,7 @@ body: |
name: inc_store_with_reused_base
# CHECK-LABEL: inc_store_with_reused_base
# CHECK: bb.0.entry:
-# CHECK: _ = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, _, 16, _, %esi
+# CHECK: %noreg = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi
# CHECK-NEXT: JMP_1 %bb.1.not_null
# CHECK: bb.1.not_null
@@ -1004,7 +1004,7 @@ body: |
liveins: %rdi, %rsi
%rax = MOV64rr %rdi
- MOV32mr killed %rdi, 1, _, 16, _, killed %esi
+ MOV32mr killed %rdi, 1, %noreg, 16, %noreg, killed %esi
RETQ %eax
bb.2.is_null:
@@ -1043,7 +1043,7 @@ body: |
liveins: %rbx
CALL64pcrel32 @f, csr_64, implicit %rsp, implicit-def %rsp
- MOV32mi %rbx, 1, _, 0, _, 20
+ MOV32mi %rbx, 1, %noreg, 0, %noreg, 20
%rax = MOV64rr killed %rbx
%rbx = POP64r implicit-def %rsp, implicit %rsp
RETQ %eax
@@ -1079,7 +1079,7 @@ body: |
%eax = MOV32rr %esi
%esi = ADD32ri killed %esi, 15, implicit-def dead %eflags
- MOV32mr killed %rdi, 1, _, 0, _, killed %esi
+ MOV32mr killed %rdi, 1, %noreg, 0, %noreg, killed %esi
RETQ %eax
bb.2.is_null:
@@ -1110,8 +1110,8 @@ body: |
bb.1.not_null:
liveins: %rdi, %rsi
- MOV32mi killed %rsi, 1, _, 0, _, 2
- %eax = MOV32rm killed %rdi, 1, _, 0, _
+ MOV32mi killed %rsi, 1, %noreg, 0, %noreg, 2
+ %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg
RETQ %eax
bb.2.is_null:
@@ -1142,8 +1142,8 @@ body: |
bb.1.not_null:
liveins: %rdi, %rsi
- %eax = MOV32rm killed %rsi, 1, _, 0, _
- MOV32mi killed %rdi, 1, _, 0, _, 2
+ %eax = MOV32rm killed %rsi, 1, %noreg, 0, %noreg
+ MOV32mi killed %rdi, 1, %noreg, 0, %noreg, 2
RETQ %eax
bb.2.is_null:
@@ -1174,8 +1174,8 @@ body: |
bb.1.not_null:
liveins: %rdi, %rsi
- MOV32mi killed %rsi, 1, _, 0, _, 3
- MOV32mi killed %rdi, 1, _, 0, _, 2
+ MOV32mi killed %rsi, 1, %noreg, 0, %noreg, 3
+ MOV32mi killed %rdi, 1, %noreg, 0, %noreg, 2
RETQ
bb.2.is_null:
@@ -1186,7 +1186,7 @@ body: |
name: inc_store_with_load_and_store
# CHECK-LABEL: inc_store_with_load_and_store
# CHECK: bb.0.entry:
-# CHECK: _ = FAULTING_OP 2, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, _, 0, _, %esi, implicit-def %eflags
+# CHECK: %noreg = FAULTING_OP 2, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %esi, implicit-def %eflags
# CHECK-NEXT: JMP_1 %bb.1.not_null
# CHECK: bb.1.not_null
@@ -1206,7 +1206,7 @@ body: |
liveins: %rdi, %rsi
%esi = ADD32rr %esi, %esi, implicit-def dead %eflags
- ADD32mr killed %rdi, 1, _, 0, _, killed %esi, implicit-def dead %eflags
+ ADD32mr killed %rdi, 1, %noreg, 0, %noreg, killed %esi, implicit-def dead %eflags
RETQ
bb.2.is_null:
@@ -1217,7 +1217,7 @@ body: |
name: inc_store_and_load_no_alias
# CHECK-LABEL: inc_store_and_load_no_alias
# CHECK: bb.0.entry:
-# CHECK: %eax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr)
+# CHECK: %eax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
# CHECK-NEXT: JMP_1 %bb.1.not_null
# CHECK: bb.1.not_null
@@ -1236,8 +1236,8 @@ body: |
bb.1.not_null:
liveins: %rdi, %rsi
- MOV32mi killed %rsi, 1, _, 0, _, 3 :: (store 4 into %ir.ptr2)
- %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr)
+ MOV32mi killed %rsi, 1, %noreg, 0, %noreg, 3 :: (store 4 into %ir.ptr2)
+ %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
RETQ %eax
bb.2.is_null:
@@ -1268,8 +1268,8 @@ body: |
bb.1.not_null:
liveins: %rdi, %rsi
- MOV32mi killed %rsi, 1, _, 0, _, 3 :: (store 4 into %ir.ptr2)
- %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr)
+ MOV32mi killed %rsi, 1, %noreg, 0, %noreg, 3 :: (store 4 into %ir.ptr2)
+ %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
RETQ %eax
bb.2.is_null:
diff --git a/test/CodeGen/X86/implicit-use-spill.mir b/test/CodeGen/X86/implicit-use-spill.mir
index 9d8b04564e5..25f245e9c4f 100644
--- a/test/CodeGen/X86/implicit-use-spill.mir
+++ b/test/CodeGen/X86/implicit-use-spill.mir
@@ -11,10 +11,10 @@ body: |
bb.0:
; CHECK: NOOP implicit-def [[VAL:%[0-9]+]]
; VAL should be spilled before csr_noregs, i.e., before we clobber all the registers
- ; CHECK-NEXT: MOV64mr [[SLOT:%stack.[0-9]+]], 1, _, 0, _, [[VAL]]
+ ; CHECK-NEXT: MOV64mr [[SLOT:%stack.[0-9]+]], 1, %noreg, 0, %noreg, [[VAL]]
; CHECK-NEXT: NOOP csr_noregs
; We need to reload before the (implicit) use.
- ; CHECK-NEXT: [[RELOADED_VAL:%[0-9]+]]:gr64 = MOV64rm [[SLOT]], 1, _, 0, _
+ ; CHECK-NEXT: [[RELOADED_VAL:%[0-9]+]]:gr64 = MOV64rm [[SLOT]], 1, %noreg, 0, %noreg
; CHECK-NEXT: NOOP implicit [[RELOADED_VAL]]
NOOP implicit-def %0
NOOP csr_noregs
diff --git a/test/CodeGen/X86/ipra-inline-asm.ll b/test/CodeGen/X86/ipra-inline-asm.ll
index e70b149e19e..4b56c3a2fd6 100644
--- a/test/CodeGen/X86/ipra-inline-asm.ll
+++ b/test/CodeGen/X86/ipra-inline-asm.ll
@@ -11,7 +11,7 @@ define void @bar() #0 {
}
; Verifies that inline assembly is correctly handled by giving a list of clobbered registers
-; CHECK: foo Clobbered Registers: AH AL AX CH CL CX DI DIL EAX ECX EDI RAX RCX RDI
+; CHECK: foo Clobbered Registers: %ah %al %ax %ch %cl %cx %di %dil %eax %ecx %edi %rax %rcx %rdi
define void @foo() #0 {
call void asm sideeffect "", "~{eax},~{ecx},~{edi}"() #0
ret void
diff --git a/test/CodeGen/X86/ipra-reg-alias.ll b/test/CodeGen/X86/ipra-reg-alias.ll
index 36b768e4c4f..c5c36075652 100644
--- a/test/CodeGen/X86/ipra-reg-alias.ll
+++ b/test/CodeGen/X86/ipra-reg-alias.ll
@@ -6,7 +6,7 @@ define i8 @main(i8 %X) {
%inc2 = mul i8 %inc, 5
; Here only CL is clobbred so CH should not be clobbred, but CX, ECX and RCX
; should be clobbered.
-; CHECK: main Clobbered Registers: AH AL AX CL CX EAX ECX EFLAGS RAX RCX
+; CHECK: main Clobbered Registers: %ah %al %ax %cl %cx %eax %ecx %eflags %rax %rcx
ret i8 %inc2
}
diff --git a/test/CodeGen/X86/ipra-reg-usage.ll b/test/CodeGen/X86/ipra-reg-usage.ll
index 6a84ab8ab75..50c066de965 100644
--- a/test/CodeGen/X86/ipra-reg-usage.ll
+++ b/test/CodeGen/X86/ipra-reg-usage.ll
@@ -3,7 +3,7 @@
target triple = "x86_64-unknown-unknown"
declare void @bar1()
define preserve_allcc void @foo()#0 {
-; CHECK: foo Clobbered Registers: CS DS EFLAGS EIP EIZ ES FPSW FS GS IP RIP RIZ SS SSP BND0 BND1 BND2 BND3 CR0 CR1 CR2 CR3 CR4 CR5 CR6 CR7 CR8 CR9 CR10 CR11 CR12 CR13 CR14 CR15 DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7 DR8 DR9 DR10 DR11 DR12 DR13 DR14 DR15 FP0 FP1 FP2 FP3 FP4 FP5 FP6 FP7 K0 K1 K2 K3 K4 K5 K6 K7 MM0 MM1 MM2 MM3 MM4 MM5 MM6 MM7 R11 ST0 ST1 ST2 ST3 ST4 ST5 ST6 ST7 XMM16 XMM17 XMM18 XMM19 XMM20 XMM21 XMM22 XMM23 XMM24 XMM25 XMM26 XMM27 XMM28 XMM29 XMM30 XMM31 YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 YMM16 YMM17 YMM18 YMM19 YMM20 YMM21 YMM22 YMM23 YMM24 YMM25 YMM26 YMM27 YMM28 YMM29 YMM30 YMM31 ZMM0 ZMM1 ZMM2 ZMM3 ZMM4 ZMM5 ZMM6 ZMM7 ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15 ZMM16 ZMM17 ZMM18 ZMM19 ZMM20 ZMM21 ZMM22 ZMM23 ZMM24 ZMM25 ZMM26 ZMM27 ZMM28 ZMM29 ZMM30 ZMM31 R11B R11D R11W
+; CHECK: foo Clobbered Registers: %cs %ds %eflags %eip %eiz %es %fpsw %fs %gs %ip %rip %riz %ss %ssp %bnd0 %bnd1 %bnd2 %bnd3 %cr0 %cr1 %cr2 %cr3 %cr4 %cr5 %cr6 %cr7 %cr8 %cr9 %cr10 %cr11 %cr12 %cr13 %cr14 %cr15 %dr0 %dr1 %dr2 %dr3 %dr4 %dr5 %dr6 %dr7 %dr8 %dr9 %dr10 %dr11 %dr12 %dr13 %dr14 %dr15 %fp0 %fp1 %fp2 %fp3 %fp4 %fp5 %fp6 %fp7 %k0 %k1 %k2 %k3 %k4 %k5 %k6 %k7 %mm0 %mm1 %mm2 %mm3 %mm4 %mm5 %mm6 %mm7 %r11 %st0 %st1 %st2 %st3 %st4 %st5 %st6 %st7 %xmm16 %xmm17 %xmm18 %xmm19 %xmm20 %xmm21 %xmm22 %xmm23 %xmm24 %xmm25 %xmm26 %xmm27 %xmm28 %xmm29 %xmm30 %xmm31 %ymm0 %ymm1 %ymm2 %ymm3 %ymm4 %ymm5 %ymm6 %ymm7 %ymm8 %ymm9 %ymm10 %ymm11 %ymm12 %ymm13 %ymm14 %ymm15 %ymm16 %ymm17 %ymm18 %ymm19 %ymm20 %ymm21 %ymm22 %ymm23 %ymm24 %ymm25 %ymm26 %ymm27 %ymm28 %ymm29 %ymm30 %ymm31 %zmm0 %zmm1 %zmm2 %zmm3 %zmm4 %zmm5 %zmm6 %zmm7 %zmm8 %zmm9 %zmm10 %zmm11 %zmm12 %zmm13 %zmm14 %zmm15 %zmm16 %zmm17 %zmm18 %zmm19 %zmm20 %zmm21 %zmm22 %zmm23 %zmm24 %zmm25 %zmm26 %zmm27 %zmm28 %zmm29 %zmm30 %zmm31 %r11b %r11d %r11w
call void @bar1()
call void @bar2()
ret void
diff --git a/test/CodeGen/X86/lea-opt-with-debug.mir b/test/CodeGen/X86/lea-opt-with-debug.mir
index dfa9eed479a..61e406985d7 100644
--- a/test/CodeGen/X86/lea-opt-with-debug.mir
+++ b/test/CodeGen/X86/lea-opt-with-debug.mir
@@ -95,28 +95,28 @@ body: |
bb.0 (%ir-block.0):
successors: %bb.1(0x80000000)
- ; CHECK: %3:gr64_nosp = LEA64r %2, 2, %2, 0, _, debug-location !13
- ; CHECK-NEXT: %4:gr64 = LEA64r %1, 4, %3, 0, _, debug-location !13
- ; CHECK-NOT: %0:gr64 = LEA64r %1, 4, %3, 8, _, debug-location !14
- ; CHECK: DBG_VALUE debug-use %4, debug-use _, !11, !DIExpression(DW_OP_plus_uconst, 8, DW_OP_stack_value), debug-location !15
-
- %1 = MOV64rm %rip, 1, _, @c, _, debug-location !13 :: (dereferenceable load 8 from @c)
- %2 = MOVSX64rm32 %rip, 1, _, @a, _, debug-location !13 :: (dereferenceable load 4 from @a)
- %3 = LEA64r %2, 2, %2, 0, _, debug-location !13
- %4 = LEA64r %1, 4, %3, 0, _, debug-location !13
+ ; CHECK: %3:gr64_nosp = LEA64r %2, 2, %2, 0, %noreg, debug-location !13
+ ; CHECK-NEXT: %4:gr64 = LEA64r %1, 4, %3, 0, %noreg, debug-location !13
+ ; CHECK-NOT: %0:gr64 = LEA64r %1, 4, %3, 8, %noreg, debug-location !14
+ ; CHECK: DBG_VALUE debug-use %4, debug-use %noreg, !11, !DIExpression(DW_OP_plus_uconst, 8, DW_OP_stack_value), debug-location !15
+
+ %1 = MOV64rm %rip, 1, %noreg, @c, %noreg, debug-location !13 :: (dereferenceable load 8 from @c)
+ %2 = MOVSX64rm32 %rip, 1, %noreg, @a, %noreg, debug-location !13 :: (dereferenceable load 4 from @a)
+ %3 = LEA64r %2, 2, %2, 0, %noreg, debug-location !13
+ %4 = LEA64r %1, 4, %3, 0, %noreg, debug-location !13
%5 = COPY %4.sub_32bit, debug-location !13
- MOV32mr %rip, 1, _, @d, _, killed %5, debug-location !13 :: (store 4 into @d)
- %0 = LEA64r %1, 4, %3, 8, _, debug-location !14
- DBG_VALUE debug-use %0, debug-use _, !11, !DIExpression(), debug-location !15
+ MOV32mr %rip, 1, %noreg, @d, %noreg, killed %5, debug-location !13 :: (store 4 into @d)
+ %0 = LEA64r %1, 4, %3, 8, %noreg, debug-location !14
+ DBG_VALUE debug-use %0, debug-use %noreg, !11, !DIExpression(), debug-location !15
; CHECK-LABEL: bb.1 (%ir-block.8):
- ; CHECK: %6:gr32 = MOV32rm %4, 1, _, 8, _, debug-location !17 :: (load 4 from %ir.7)
+ ; CHECK: %6:gr32 = MOV32rm %4, 1, %noreg, 8, %noreg, debug-location !17 :: (load 4 from %ir.7)
bb.1 (%ir-block.8):
successors: %bb.1(0x80000000)
- %6 = MOV32rm %0, 1, _, 0, _, debug-location !17 :: (load 4 from %ir.7)
- MOV32mr %rip, 1, _, @d, _, killed %6, debug-location !17 :: (store 4 into @d)
+ %6 = MOV32rm %0, 1, %noreg, 0, %noreg, debug-location !17 :: (load 4 from %ir.7)
+ MOV32mr %rip, 1, %noreg, @d, %noreg, killed %6, debug-location !17 :: (store 4 into @d)
JMP_1 %bb.1, debug-location !18
...
diff --git a/test/CodeGen/X86/leaFixup32.mir b/test/CodeGen/X86/leaFixup32.mir
index e3986e47df4..d8e52802f56 100644
--- a/test/CodeGen/X86/leaFixup32.mir
+++ b/test/CodeGen/X86/leaFixup32.mir
@@ -107,7 +107,7 @@ body: |
; CHECK: %eax = ADD32rr %eax, killed %ebp
; CHECK: %eax = ADD32ri8 %eax, -5
- %eax = LEA32r killed %eax, 1, killed %ebp, -5, _
+ %eax = LEA32r killed %eax, 1, killed %ebp, -5, %noreg
RETQ %eax
...
@@ -142,7 +142,7 @@ body: |
; CHECK: %ebp = ADD32rr %ebp, killed %eax
; CHECK: %ebp = ADD32ri8 %ebp, -5
- %ebp = LEA32r killed %ebp, 1, killed %eax, -5, _
+ %ebp = LEA32r killed %ebp, 1, killed %eax, -5, %noreg
RETQ %ebp
...
@@ -176,7 +176,7 @@ body: |
liveins: %eax, %ebp
; CHECK: %ebp = ADD32rr %ebp, killed %eax
- %ebp = LEA32r killed %ebp, 1, killed %eax, 0, _
+ %ebp = LEA32r killed %ebp, 1, killed %eax, 0, %noreg
RETQ %ebp
...
@@ -212,7 +212,7 @@ body: |
; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0
; CHECK: %ebx = ADD32ri8 %ebx, -5
- %ebx = LEA32r killed %eax, 1, killed %ebp, -5, _
+ %ebx = LEA32r killed %eax, 1, killed %ebp, -5, %noreg
RETQ %ebx
...
@@ -245,10 +245,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp
- ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, _
+ ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, %noreg
; CHECK: %ebx = ADD32ri8 %ebx, -5
- %ebx = LEA32r killed %ebp, 1, killed %eax, -5, _
+ %ebx = LEA32r killed %ebp, 1, killed %eax, -5, %noreg
RETQ %ebx
...
@@ -281,9 +281,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp
- ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, _
+ ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, %noreg
- %ebx = LEA32r killed %ebp, 1, killed %eax, 0, _
+ %ebx = LEA32r killed %ebp, 1, killed %eax, 0, %noreg
RETQ %ebx
...
@@ -318,7 +318,7 @@ body: |
; CHECK: %eax = ADD32rr %eax, killed %ebp
; CHECK: %eax = ADD32ri %eax, 129
- %eax = LEA32r killed %eax, 1, killed %ebp, 129, _
+ %eax = LEA32r killed %eax, 1, killed %ebp, 129, %noreg
RETQ %eax
...
@@ -354,7 +354,7 @@ body: |
; CHECK: %ebx = MOV32rr %ebp
; CHECK: %ebx = ADD32rr %ebx, %ebp
- %ebx = LEA32r %ebp, 1, %ebp, 0, _
+ %ebx = LEA32r %ebp, 1, %ebp, 0, %noreg
RETQ %ebx
...
@@ -386,10 +386,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = LEA32r _, 1, %ebp, 5, _
+ ; CHECK: %ebx = LEA32r %noreg, 1, %ebp, 5, %noreg
; CHECK: %ebx = ADD32rr %ebx, %ebp
- %ebx = LEA32r %ebp, 1, %ebp, 5, _
+ %ebx = LEA32r %ebp, 1, %ebp, 5, %noreg
RETQ %ebx
...
@@ -421,10 +421,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = LEA32r _, 4, %ebp, 5, _
+ ; CHECK: %ebx = LEA32r %noreg, 4, %ebp, 5, %noreg
; CHECK: %ebx = ADD32rr %ebx, %ebp
- %ebx = LEA32r %ebp, 4, %ebp, 5, _
+ %ebx = LEA32r %ebp, 4, %ebp, 5, %noreg
RETQ %ebx
...
@@ -456,9 +456,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp, %ebx
- ; CHECK: %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, _
+ ; CHECK: %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, %noreg
- %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, _
+ %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, %noreg
RETQ %ebp
...
@@ -490,17 +490,17 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = LEA32r killed %eax, 4, killed %eax, 5, _
- ; CHECK: %ebp = LEA32r killed %ebx, 4, killed %ebx, 0, _
+ ; CHECK: %ebx = LEA32r killed %eax, 4, killed %eax, 5, %noreg
+ ; CHECK: %ebp = LEA32r killed %ebx, 4, killed %ebx, 0, %noreg
; CHECK: %ebp = ADD32ri8 %ebp, 5
CMP32rr %eax, killed %ebx, implicit-def %eflags
- %ebx = LEA32r killed %eax, 4, killed %eax, 5, _
+ %ebx = LEA32r killed %eax, 4, killed %eax, 5, %noreg
JE_1 %bb.1, implicit %eflags
RETQ %ebx
bb.1:
liveins: %eax, %ebp, %ebx
- %ebp = LEA32r killed %ebx, 4, killed %ebx, 5, _
+ %ebp = LEA32r killed %ebx, 4, killed %ebx, 5, %noreg
RETQ %ebp
...
diff --git a/test/CodeGen/X86/leaFixup64.mir b/test/CodeGen/X86/leaFixup64.mir
index b35dee181a4..ad86d4ba27f 100644
--- a/test/CodeGen/X86/leaFixup64.mir
+++ b/test/CodeGen/X86/leaFixup64.mir
@@ -180,7 +180,7 @@ body: |
; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0
; CHECK: %eax = ADD32ri8 %eax, -5
- %eax = LEA64_32r killed %rax, 1, killed %rbp, -5, _
+ %eax = LEA64_32r killed %rax, 1, killed %rbp, -5, %noreg
RETQ %eax
...
@@ -215,7 +215,7 @@ body: |
; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0
; CHECK: %ebp = ADD32ri8 %ebp, -5
- %ebp = LEA64_32r killed %rbp, 1, killed %rax, -5, _
+ %ebp = LEA64_32r killed %rbp, 1, killed %rax, -5, %noreg
RETQ %ebp
...
@@ -249,7 +249,7 @@ body: |
liveins: %rax, %rbp
; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0
- %ebp = LEA64_32r killed %rbp, 1, killed %rax, 0, _
+ %ebp = LEA64_32r killed %rbp, 1, killed %rax, 0, %noreg
RETQ %ebp
...
@@ -284,7 +284,7 @@ body: |
; CHECK: %rax = ADD64rr %rax, killed %rbp
; CHECK: %rax = ADD64ri8 %rax, -5
- %rax = LEA64r killed %rax, 1, killed %rbp, -5, _
+ %rax = LEA64r killed %rax, 1, killed %rbp, -5, %noreg
RETQ %eax
...
@@ -319,7 +319,7 @@ body: |
; CHECK: %rbp = ADD64rr %rbp, killed %rax
; CHECK: %rbp = ADD64ri8 %rbp, -5
- %rbp = LEA64r killed %rbp, 1, killed %rax, -5, _
+ %rbp = LEA64r killed %rbp, 1, killed %rax, -5, %noreg
RETQ %ebp
...
@@ -353,7 +353,7 @@ body: |
liveins: %rax, %rbp
; CHECK: %rbp = ADD64rr %rbp, killed %rax
- %rbp = LEA64r killed %rbp, 1, killed %rax, 0, _
+ %rbp = LEA64r killed %rbp, 1, killed %rax, 0, %noreg
RETQ %ebp
...
@@ -386,10 +386,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp
- ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, %noreg
; CHECK: %ebx = ADD32ri8 %ebx, -5
- %ebx = LEA64_32r killed %rax, 1, killed %rbp, -5, _
+ %ebx = LEA64_32r killed %rax, 1, killed %rbp, -5, %noreg
RETQ %ebx
...
@@ -422,10 +422,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp
- ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, %noreg
; CHECK: %ebx = ADD32ri8 %ebx, -5
- %ebx = LEA64_32r killed %rbp, 1, killed %rax, -5, _
+ %ebx = LEA64_32r killed %rbp, 1, killed %rax, -5, %noreg
RETQ %ebx
...
@@ -458,9 +458,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp
- ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, %noreg
- %ebx = LEA64_32r killed %rbp, 1, killed %rax, 0, _
+ %ebx = LEA64_32r killed %rbp, 1, killed %rax, 0, %noreg
RETQ %ebx
...
@@ -493,10 +493,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp
- ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, %noreg
; CHECK: %rbx = ADD64ri8 %rbx, -5
- %rbx = LEA64r killed %rax, 1, killed %rbp, -5, _
+ %rbx = LEA64r killed %rax, 1, killed %rbp, -5, %noreg
RETQ %ebx
...
@@ -529,10 +529,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp
- ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, %noreg
; CHECK: %rbx = ADD64ri8 %rbx, -5
- %rbx = LEA64r killed %rbp, 1, killed %rax, -5, _
+ %rbx = LEA64r killed %rbp, 1, killed %rax, -5, %noreg
RETQ %ebx
...
@@ -565,9 +565,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp
- ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, %noreg
- %rbx = LEA64r killed %rbp, 1, killed %rax, 0, _
+ %rbx = LEA64r killed %rbp, 1, killed %rax, 0, %noreg
RETQ %ebx
...
@@ -599,11 +599,11 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rdi, %rbp
- ; CHECK: %r12 = LEA64r _, 2, killed %r13, 5, _
+ ; CHECK: %r12 = LEA64r %noreg, 2, killed %r13, 5, %noreg
; CHECK: %r12 = ADD64rr %r12, killed %rbp
%rbp = KILL %rbp, implicit-def %rbp
%r13 = KILL %rdi, implicit-def %r13
- %r12 = LEA64r killed %rbp, 2, killed %r13, 5, _
+ %r12 = LEA64r killed %rbp, 2, killed %r13, 5, %noreg
RETQ %r12
...
@@ -638,7 +638,7 @@ body: |
; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0
; CHECK: %eax = ADD32ri %eax, 129
- %eax = LEA64_32r killed %rax, 1, killed %rbp, 129, _
+ %eax = LEA64_32r killed %rax, 1, killed %rbp, 129, %noreg
RETQ %eax
...
@@ -670,9 +670,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, _
+ ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, %noreg
- %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, _
+ %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, %noreg
RETQ %ebx
...
@@ -704,9 +704,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, _
+ ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, %noreg
- %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, _
+ %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, %noreg
RETQ %ebx
...
@@ -738,9 +738,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, _
+ ; CHECK: %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, %noreg
- %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, _
+ %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, %noreg
RETQ %ebx
...
@@ -775,7 +775,7 @@ body: |
; CHECK: %rax = ADD64rr %rax, killed %rbp
; CHECK: %rax = ADD64ri32 %rax, 129
- %rax = LEA64r killed %rax, 1, killed %rbp, 129, _
+ %rax = LEA64r killed %rax, 1, killed %rbp, 129, %noreg
RETQ %eax
...
@@ -810,7 +810,7 @@ body: |
; CHECK: %rbx = MOV64rr %rbp
; CHECK: %rbx = ADD64rr %rbx, %rbp
- %rbx = LEA64r %rbp, 1, %rbp, 0, _
+ %rbx = LEA64r %rbp, 1, %rbp, 0, %noreg
RETQ %ebx
...
@@ -842,10 +842,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %rbx = LEA64r _, 1, %rbp, 5, _
+ ; CHECK: %rbx = LEA64r %noreg, 1, %rbp, 5, %noreg
; CHECK: %rbx = ADD64rr %rbx, %rbp
- %rbx = LEA64r %rbp, 1, %rbp, 5, _
+ %rbx = LEA64r %rbp, 1, %rbp, 5, %noreg
RETQ %ebx
...
@@ -877,10 +877,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %rbx = LEA64r _, 4, %rbp, 5, _
+ ; CHECK: %rbx = LEA64r %noreg, 4, %rbp, 5, %noreg
; CHECK: %rbx = ADD64rr %rbx, %rbp
- %rbx = LEA64r %rbp, 4, %rbp, 5, _
+ %rbx = LEA64r %rbp, 4, %rbp, 5, %noreg
RETQ %ebx
...
@@ -912,9 +912,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, _
+ ; CHECK: %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, %noreg
- %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, _
+ %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, %noreg
RETQ %ebp
...
@@ -946,17 +946,17 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %rbx = LEA64r killed %rax, 4, killed %rax, 5, _
- ; CHECK: %rbp = LEA64r killed %rbx, 4, killed %rbx, 0, _
+ ; CHECK: %rbx = LEA64r killed %rax, 4, killed %rax, 5, %noreg
+ ; CHECK: %rbp = LEA64r killed %rbx, 4, killed %rbx, 0, %noreg
; CHECK: %rbp = ADD64ri8 %rbp, 5
CMP64rr %rax, killed %rbx, implicit-def %eflags
- %rbx = LEA64r killed %rax, 4, killed %rax, 5, _
+ %rbx = LEA64r killed %rax, 4, killed %rax, 5, %noreg
JE_1 %bb.1, implicit %eflags
RETQ %ebx
bb.1:
liveins: %rax, %rbp, %rbx
- %rbp = LEA64r killed %rbx, 4, killed %rbx, 5, _
+ %rbp = LEA64r killed %rbx, 4, killed %rbx, 5, %noreg
RETQ %ebp
...
@@ -988,9 +988,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, _
+ ; CHECK: %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, %noreg
- %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, _
+ %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, %noreg
RETQ %ebp
...
@@ -1022,17 +1022,17 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, _
- ; CHECK: %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 0, _
+ ; CHECK: %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, %noreg
+ ; CHECK: %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 0, %noreg
; CHECK: %ebp = ADD32ri8 %ebp, 5
CMP64rr %rax, killed %rbx, implicit-def %eflags
- %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, _
+ %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, %noreg
JE_1 %bb.1, implicit %eflags
RETQ %ebx
bb.1:
liveins: %rax, %rbp, %rbx
- %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 5, _
+ %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 5, %noreg
RETQ %ebp
...
diff --git a/test/CodeGen/X86/movtopush.mir b/test/CodeGen/X86/movtopush.mir
index 95ba9490c31..4c1dfc57627 100644
--- a/test/CodeGen/X86/movtopush.mir
+++ b/test/CodeGen/X86/movtopush.mir
@@ -41,10 +41,10 @@
# CHECK-NEXT: CALLpcrel32 @good, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp
# CHECK-NEXT: ADJCALLSTACKUP32 16, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
# CHECK-NEXT: ADJCALLSTACKDOWN32 20, 0, 20, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
-# CHECK-NEXT: %1:gr32 = MOV32rm %stack.2.s, 1, _, 0, _ :: (load 4 from %stack.2.s, align 8)
-# CHECK-NEXT: %2:gr32 = MOV32rm %stack.2.s, 1, _, 4, _ :: (load 4 from %stack.2.s + 4)
-# CHECK-NEXT: %4:gr32 = LEA32r %stack.0.p, 1, _, 0, _
-# CHECK-NEXT: %5:gr32 = LEA32r %stack.1.q, 1, _, 0, _
+# CHECK-NEXT: %1:gr32 = MOV32rm %stack.2.s, 1, %noreg, 0, %noreg :: (load 4 from %stack.2.s, align 8)
+# CHECK-NEXT: %2:gr32 = MOV32rm %stack.2.s, 1, %noreg, 4, %noreg :: (load 4 from %stack.2.s + 4)
+# CHECK-NEXT: %4:gr32 = LEA32r %stack.0.p, 1, %noreg, 0, %noreg
+# CHECK-NEXT: %5:gr32 = LEA32r %stack.1.q, 1, %noreg, 0, %noreg
# CHECK-NEXT: PUSH32r %4, implicit-def %esp, implicit %esp
# CHECK-NEXT: PUSH32r %5, implicit-def %esp, implicit %esp
# CHECK-NEXT: PUSH32i8 6, implicit-def %esp, implicit %esp
@@ -101,23 +101,23 @@ body: |
bb.0.entry:
ADJCALLSTACKDOWN32 16, 0, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
%0 = COPY %esp
- MOV32mi %0, 1, _, 12, _, 4 :: (store 4 into stack + 12)
- MOV32mi %0, 1, _, 8, _, 3 :: (store 4 into stack + 8)
- MOV32mi %0, 1, _, 4, _, 2 :: (store 4 into stack + 4)
- MOV32mi %0, 1, _, 0, _, 1 :: (store 4 into stack)
+ MOV32mi %0, 1, %noreg, 12, %noreg, 4 :: (store 4 into stack + 12)
+ MOV32mi %0, 1, %noreg, 8, %noreg, 3 :: (store 4 into stack + 8)
+ MOV32mi %0, 1, %noreg, 4, %noreg, 2 :: (store 4 into stack + 4)
+ MOV32mi %0, 1, %noreg, 0, %noreg, 1 :: (store 4 into stack)
CALLpcrel32 @good, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp
ADJCALLSTACKUP32 16, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
ADJCALLSTACKDOWN32 20, 0, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
- %1 = MOV32rm %stack.2.s, 1, _, 0, _ :: (load 4 from %stack.2.s, align 8)
- %2 = MOV32rm %stack.2.s, 1, _, 4, _ :: (load 4 from %stack.2.s + 4)
+ %1 = MOV32rm %stack.2.s, 1, %noreg, 0, %noreg :: (load 4 from %stack.2.s, align 8)
+ %2 = MOV32rm %stack.2.s, 1, %noreg, 4, %noreg :: (load 4 from %stack.2.s + 4)
%3 = COPY %esp
- MOV32mr %3, 1, _, 4, _, killed %2 :: (store 4)
- MOV32mr %3, 1, _, 0, _, killed %1 :: (store 4)
- %4 = LEA32r %stack.0.p, 1, _, 0, _
- MOV32mr %3, 1, _, 16, _, killed %4 :: (store 4 into stack + 16)
- %5 = LEA32r %stack.1.q, 1, _, 0, _
- MOV32mr %3, 1, _, 12, _, killed %5 :: (store 4 into stack + 12)
- MOV32mi %3, 1, _, 8, _, 6 :: (store 4 into stack + 8)
+ MOV32mr %3, 1, %noreg, 4, %noreg, killed %2 :: (store 4)
+ MOV32mr %3, 1, %noreg, 0, %noreg, killed %1 :: (store 4)
+ %4 = LEA32r %stack.0.p, 1, %noreg, 0, %noreg
+ MOV32mr %3, 1, %noreg, 16, %noreg, killed %4 :: (store 4 into stack + 16)
+ %5 = LEA32r %stack.1.q, 1, %noreg, 0, %noreg
+ MOV32mr %3, 1, %noreg, 12, %noreg, killed %5 :: (store 4 into stack + 12)
+ MOV32mi %3, 1, %noreg, 8, %noreg, 6 :: (store 4 into stack + 8)
CALLpcrel32 @struct, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp,
ADJCALLSTACKUP32 20, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
RET 0
diff --git a/test/CodeGen/X86/non-value-mem-operand.mir b/test/CodeGen/X86/non-value-mem-operand.mir
index 3e969a56170..b537a637d8d 100644
--- a/test/CodeGen/X86/non-value-mem-operand.mir
+++ b/test/CodeGen/X86/non-value-mem-operand.mir
@@ -175,14 +175,14 @@ body: |
successors: %bb.4.bb7(0x80000000)
liveins: %rax
- MOV64mr %rsp, 1, _, 32, _, %rax :: (store 8 into %stack.5)
+ MOV64mr %rsp, 1, %noreg, 32, %noreg, %rax :: (store 8 into %stack.5)
%r12 = MOV64rr killed %rax
%r12 = ADD64ri8 killed %r12, 16, implicit-def dead %eflags
%xmm0 = XORPSrr undef %xmm0, undef %xmm0
%esi = XOR32rr undef %esi, undef %esi, implicit-def dead %eflags
%rax = MOV64ri %const.0
- %xmm1 = MOVSDrm killed %rax, 1, _, 0, _ :: (load 8 from constant-pool)
- MOVSDmr %rsp, 1, _, 40, _, killed %xmm1 :: (store 8 into %stack.4)
+ %xmm1 = MOVSDrm killed %rax, 1, %noreg, 0, %noreg :: (load 8 from constant-pool)
+ MOVSDmr %rsp, 1, %noreg, 40, %noreg, killed %xmm1 :: (store 8 into %stack.4)
%eax = IMPLICIT_DEF
%ecx = XOR32rr undef %ecx, undef %ecx, implicit-def dead %eflags
@@ -200,11 +200,11 @@ body: |
successors: %bb.6.bb26(0x80000000)
liveins: %ebp, %rbx, %r14, %xmm0
- MOV32mr %rsp, 1, _, 24, _, %ebx :: (store 4 into %stack.0, align 8)
- MOV32mr %rsp, 1, _, 16, _, %ebp :: (store 4 into %stack.1, align 8)
- MOVSDmr %rsp, 1, _, 8, _, killed %xmm0 :: (store 8 into %stack.2)
- %rax = MOV64rm %rsp, 1, _, 32, _ :: (load 8 from %stack.5)
- MOV64mr %rsp, 1, _, 48, _, killed %rax :: (store 8 into %stack.3)
+ MOV32mr %rsp, 1, %noreg, 24, %noreg, %ebx :: (store 4 into %stack.0, align 8)
+ MOV32mr %rsp, 1, %noreg, 16, %noreg, %ebp :: (store 4 into %stack.1, align 8)
+ MOVSDmr %rsp, 1, %noreg, 8, %noreg, killed %xmm0 :: (store 8 into %stack.2)
+ %rax = MOV64rm %rsp, 1, %noreg, 32, %noreg :: (load 8 from %stack.5)
+ MOV64mr %rsp, 1, %noreg, 48, %noreg, killed %rax :: (store 8 into %stack.3)
%rax = MOV64ri @wibble
STATEPOINT 2882400000, 0, 0, killed %rax, 2, 0, 2, 0, 2, 30, 2, 1, 2, 0, 2, 99, 2, 0, 2, 12, 2, 0, 2, 10, 1, 8, %rsp, 24, 2, 10, 2, 0, 2, 10, 1, 8, %rsp, 16, 2, 10, 2, 4278124286, 2, 6, 2, 4278124286, 2, 7, 1, 8, %rsp, 8, 2, 99, 2, 0, 2, 7, 2, 4278124286, 2, 99, 2, 0, 2, 13, 1, 8, %rsp, 48, 2, 7, 2, 4278124286, 2, 99, 2, 0, csr_64, implicit-def %rsp :: (volatile load 8 from %stack.0), (volatile load 8 from %stack.1), (volatile load 8 from %stack.2), (volatile load 8 from %stack.3)
%esi = XOR32rr undef %esi, undef %esi, implicit-def dead %eflags
@@ -215,16 +215,16 @@ body: |
liveins: %ebp, %esi, %rbx, %r12, %r14
%rax = MOV64ri @global.1
- %rax = MOV64rm killed %rax, 1, _, 0, _ :: (dereferenceable load 8 from @global.1)
+ %rax = MOV64rm killed %rax, 1, %noreg, 0, %noreg :: (dereferenceable load 8 from @global.1)
TEST64rr %rax, %rax, implicit-def %eflags
%rax = CMOVE64rr undef %rax, killed %rax, implicit killed %eflags
- %ecx = MOV32rm undef %rax, 1, _, 0, _ :: (load 4 from `i32* undef`)
- %rdx = MOV64rm %r12, 8, %r14, 0, _ :: (load 8 from %ir.tmp3)
- %r15 = LEA64r %rdx, 1, _, 1, _
- MOV64mr %r12, 8, %r14, 0, _, %r15 :: (store 8 into %ir.tmp3)
+ %ecx = MOV32rm undef %rax, 1, %noreg, 0, %noreg :: (load 4 from `i32* undef`)
+ %rdx = MOV64rm %r12, 8, %r14, 0, %noreg :: (load 8 from %ir.tmp3)
+ %r15 = LEA64r %rdx, 1, %noreg, 1, _
+ MOV64mr %r12, 8, %r14, 0, %noreg, %r15 :: (store 8 into %ir.tmp3)
%ecx = SUB32rr killed %ecx, %edx, implicit-def dead %eflags, implicit killed %rdx
- MOV32mr undef %rax, 1, _, 0, _, killed %ecx :: (store 4 into `i32* undef`)
- %r13 = MOV64rm killed %rax, 1, _, 768, _ :: (load 8 from %ir.tmp33)
+ MOV32mr undef %rax, 1, %noreg, 0, %noreg, killed %ecx :: (store 4 into `i32* undef`)
+ %r13 = MOV64rm killed %rax, 1, %noreg, 768, %noreg :: (load 8 from %ir.tmp33)
TEST8rr %sil, %sil, implicit-def %eflags
%rax = IMPLICIT_DEF
JNE_1 %bb.8.bb37, implicit %eflags
@@ -242,7 +242,7 @@ body: |
successors: %bb.9.bb37(0x40000000), %bb.10.bb37(0x40000000)
liveins: %ebp, %esi, %rax, %rbx, %r12, %r13, %r14, %r15
- %rcx = MOV64rm killed %rax, 1, _, 760, _ :: (load 8 from %ir.tmp40)
+ %rcx = MOV64rm killed %rax, 1, %noreg, 760, %noreg :: (load 8 from %ir.tmp40)
CMP64rr %r13, %rcx, implicit-def %eflags
JL_1 %bb.10.bb37, implicit %eflags
@@ -258,12 +258,12 @@ body: |
%cl = KILL %cl, implicit killed %rcx
%r15 = SAR64rCL killed %r15, implicit-def dead %eflags, implicit %cl
- MOV64mr %r12, 8, killed %r14, 0, _, killed %r15 :: (store 8 into %ir.tmp7)
- MOV64mi32 undef %rax, 1, _, 0, _, 0 :: (store 8 into `i64* undef`)
- %eax = LEA64_32r %rbx, 1, _, 1, _
+ MOV64mr %r12, 8, killed %r14, 0, %noreg, killed %r15 :: (store 8 into %ir.tmp7)
+ MOV64mi32 undef %rax, 1, %noreg, 0, %noreg, 0 :: (store 8 into `i64* undef`)
+ %eax = LEA64_32r %rbx, 1, %noreg, 1, _
%ecx = MOV32ri 6
CMP32ri %eax, 15141, implicit-def %eflags
- %xmm0 = MOVSDrm %rsp, 1, _, 40, _ :: (load 8 from %stack.4)
+ %xmm0 = MOVSDrm %rsp, 1, %noreg, 40, %noreg :: (load 8 from %stack.4)
JL_1 %bb.4.bb7, implicit %eflags
bb.11.bb51.loopexit:
@@ -273,14 +273,14 @@ body: |
%ebp = INC32r killed %ebp, implicit-def dead %eflags
%ebx = INC32r %ebx, implicit-def dead %eflags, implicit killed %rbx, implicit-def %rbx
%rax = MOV64ri %const.0
- %xmm0 = MOVSDrm killed %rax, 1, _, 0, _ :: (load 8 from constant-pool)
+ %xmm0 = MOVSDrm killed %rax, 1, %noreg, 0, %noreg :: (load 8 from constant-pool)
bb.12.bb51:
liveins: %ebp, %rbx, %xmm0
- MOV32mr %rsp, 1, _, 24, _, %ebx, implicit killed %rbx :: (store 4 into %stack.0, align 8)
- MOV32mr %rsp, 1, _, 16, _, killed %ebp :: (store 4 into %stack.1, align 8)
- MOVSDmr %rsp, 1, _, 8, _, killed %xmm0 :: (store 8 into %stack.2)
+ MOV32mr %rsp, 1, %noreg, 24, %noreg, %ebx, implicit killed %rbx :: (store 4 into %stack.0, align 8)
+ MOV32mr %rsp, 1, %noreg, 16, %noreg, killed %ebp :: (store 4 into %stack.1, align 8)
+ MOVSDmr %rsp, 1, %noreg, 8, %noreg, killed %xmm0 :: (store 8 into %stack.2)
%rax = MOV64ri @wobble
%edi = MOV32ri -121
STATEPOINT 2882400000, 0, 1, killed %rax, %edi, 2, 0, 2, 0, 2, 38, 2, 1, 2, 0, 2, 270, 2, 4, 2, 12, 2, 0, 2, 11, 2, 4278124286, 2, 99, 2, 0, 2, 10, 1, 8, %rsp, 24, 2, 6, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 0, 2, 10, 1, 8, %rsp, 16, 2, 10, 2, 4278124286, 2, 99, 2, 0, 2, 7, 1, 8, %rsp, 8, 2, 99, 2, 0, 2, 7, 2, 4278124286, 2, 99, 2, 0, 2, 13, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 0, csr_64, implicit-def %rsp :: (volatile load 8 from %stack.0), (volatile load 8 from %stack.1), (volatile load 8 from %stack.2)
diff --git a/test/CodeGen/X86/peephole-recurrence.mir b/test/CodeGen/X86/peephole-recurrence.mir
index 07ce876d99e..115f38c4ca2 100644
--- a/test/CodeGen/X86/peephole-recurrence.mir
+++ b/test/CodeGen/X86/peephole-recurrence.mir
@@ -215,7 +215,7 @@ body: |
; CHECK: %11:gr32 = ADD32rr
; CHECK-SAME: %1,
; CHECK-SAME: %0,
- MOV32mr %5, 1, _, 0, _, %0 :: (store 4 into %ir.p)
+ MOV32mr %5, 1, %noreg, 0, %noreg, %0 :: (store 4 into %ir.p)
%3 = ADD32rr %2, killed %11, implicit-def dead %eflags
; CHECK: %3:gr32 = ADD32rr
; CHECK-SAME: %2,
diff --git a/test/CodeGen/X86/post-ra-sched-with-debug.mir b/test/CodeGen/X86/post-ra-sched-with-debug.mir
index ee07571d17b..41321598204 100644
--- a/test/CodeGen/X86/post-ra-sched-with-debug.mir
+++ b/test/CodeGen/X86/post-ra-sched-with-debug.mir
@@ -250,9 +250,9 @@ body: |
successors: %bb.3, %bb.2
liveins: %esi, %rdi, %r14, %rbx, %rbp
- ; CHECK: [[REGISTER:%r[a-z0-9]+]] = LEA64r {{%r[a-z0-9]+}}, 1, _, -20, _
- ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use _, ![[J_VAR]], !DIExpression(), debug-location ![[J_LOC]]
- ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use _, ![[I_VAR]], !DIExpression(), debug-location ![[I_LOC]]
+ ; CHECK: [[REGISTER:%r[a-z0-9]+]] = LEA64r {{%r[a-z0-9]+}}, 1, %noreg, -20, %noreg
+ ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use %noreg, ![[J_VAR]], !DIExpression(), debug-location ![[J_LOC]]
+ ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use %noreg, ![[I_VAR]], !DIExpression(), debug-location ![[I_LOC]]
frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp
CFI_INSTRUCTION def_cfa_offset 16
@@ -267,32 +267,32 @@ body: |
%r14d = MOV32rr %esi
%rbx = MOV64rr %rdi
CALL64pcrel32 @_ZN1lC2Ei, csr_64, implicit %rsp, implicit %rdi, implicit %esi, implicit-def %rsp
- %rdi = LEA64r %rbx, 1, _, 8, _
- DBG_VALUE debug-use %rdi, debug-use _, !20, !17, debug-location !27
- DBG_VALUE debug-use %rdi, debug-use _, !10, !17, debug-location !18
- %rax = MOV64rm %rbx, 1, _, 16, _ :: (load 8)
- MOV64mr %rbx, 1, _, 8, _, killed %rax :: (store 8)
- MOV64mr %rbx, 1, _, 24, _, %rdi :: (store 8)
+ %rdi = LEA64r %rbx, 1, %noreg, 8, %noreg
+ DBG_VALUE debug-use %rdi, debug-use %noreg, !20, !17, debug-location !27
+ DBG_VALUE debug-use %rdi, debug-use %noreg, !10, !17, debug-location !18
+ %rax = MOV64rm %rbx, 1, %noreg, 16, %noreg :: (load 8)
+ MOV64mr %rbx, 1, %noreg, 8, %noreg, killed %rax :: (store 8)
+ MOV64mr %rbx, 1, %noreg, 24, %noreg, %rdi :: (store 8)
%eax = MOV32ri -1
%cl = MOV8rr %r14b, implicit killed %r14d
%eax = SHL32rCL killed %eax, implicit-def dead %eflags, implicit %cl
- MOV32mr %rbx, 1, _, 32, _, %eax :: (store 4, align 8)
- MOV32mi %rbp, 1, _, -20, _, 0 :: (store 4)
- %rcx = MOV64rm %rbx, 1, _, 8, _ :: (load 8)
- MOV64mr %rip, 1, _, @n, _, %rcx :: (store 8)
+ MOV32mr %rbx, 1, %noreg, 32, %noreg, %eax :: (store 4, align 8)
+ MOV32mi %rbp, 1, %noreg, -20, %noreg, 0 :: (store 4)
+ %rcx = MOV64rm %rbx, 1, %noreg, 8, %noreg :: (load 8)
+ MOV64mr %rip, 1, %noreg, @n, %noreg, %rcx :: (store 8)
%edx = XOR32rr undef %edx, undef %edx, implicit-def dead %eflags, implicit-def %rdx
TEST64rr %rcx, %rcx, implicit-def %eflags
%esi = MOV32ri @o, implicit-def %rsi
%rsi = CMOVNE64rr killed %rsi, %rdx, implicit killed %eflags
%rsi = OR64rr killed %rsi, killed %rcx, implicit-def %eflags
- %rcx = LEA64r %rbp, 1, _, -20, _
- DBG_VALUE debug-use %rcx, debug-use _, !46, !17, debug-location !48
- DBG_VALUE debug-use %rcx, debug-use _, !39, !17, debug-location !44
+ %rcx = LEA64r %rbp, 1, %noreg, -20, %noreg
+ DBG_VALUE debug-use %rcx, debug-use %noreg, !46, !17, debug-location !48
+ DBG_VALUE debug-use %rcx, debug-use %noreg, !39, !17, debug-location !44
DBG_VALUE %rbp, -20, !29, !17, debug-location !36
%rcx = CMOVNE64rr killed %rcx, killed %rdx, implicit killed %eflags
%rcx = OR64rr killed %rcx, killed %rsi, implicit-def dead %eflags
- %rdx = MOVSX64rm32 %rbx, 1, _, 0, _ :: (load 4, align 8)
- TEST32mr killed %rcx, 4, killed %rdx, 0, _, killed %eax, implicit-def %eflags :: (load 4)
+ %rdx = MOVSX64rm32 %rbx, 1, %noreg, 0, %noreg :: (load 4, align 8)
+ TEST32mr killed %rcx, 4, killed %rdx, 0, %noreg, killed %eax, implicit-def %eflags :: (load 4)
JNE_1 %bb.2, implicit %eflags
JMP_1 %bb.3
@@ -300,7 +300,7 @@ body: |
successors: %bb.2
liveins: %rbx, %rbp
- %rdi = MOV64rm %rbx, 1, _, 24, _ :: (load 8)
+ %rdi = MOV64rm %rbx, 1, %noreg, 24, %noreg :: (load 8)
bb.2:
successors: %bb.1, %bb.3
@@ -308,11 +308,11 @@ body: |
CALL64pcrel32 @_ZN1p2aaEv, csr_64, implicit %rsp, implicit %rdi, implicit-def %rsp, implicit-def %eax
%eax = KILL %eax, implicit-def %rax
- %ecx = LEA64_32r %rax, 1, _, -1, _, implicit-def %rcx
+ %ecx = LEA64_32r %rax, 1, %noreg, -1, %noreg, implicit-def %rcx
%ecx = SHR32ri %ecx, 31, implicit-def dead %eflags, implicit killed %rcx, implicit-def %rcx
- %eax = LEA64_32r killed %rax, 1, killed %rcx, -1, _
+ %eax = LEA64_32r killed %rax, 1, killed %rcx, -1, %noreg
%eax = SAR32r1 killed %eax, implicit-def dead %eflags
- CMP32mr %rbx, 1, _, 0, _, killed %eax, implicit-def %eflags :: (load 4, align 8), (load 4, align 8)
+ CMP32mr %rbx, 1, %noreg, 0, %noreg, killed %eax, implicit-def %eflags :: (load 4, align 8), (load 4, align 8)
JG_1 %bb.1, implicit killed %eflags
bb.3:
diff --git a/test/CodeGen/X86/pr27681.mir b/test/CodeGen/X86/pr27681.mir
index 956df172b25..8e0296c6d7f 100644
--- a/test/CodeGen/X86/pr27681.mir
+++ b/test/CodeGen/X86/pr27681.mir
@@ -47,11 +47,11 @@ body: |
TEST32rr %edx, %edx, implicit-def %eflags
%cl = SETNEr implicit %eflags
; This %bl def is antidependent on the above use of %ebx
- %bl = MOV8rm %esp, 1, _, 3, _ ; :: (load 1 from %stack.0)
+ %bl = MOV8rm %esp, 1, %noreg, 3, _ ; :: (load 1 from %stack.0)
%cl = OR8rr killed %cl, %bl, implicit-def dead %eflags
%esi = MOVZX32rr8 killed %cl
%esi = ADD32rr killed %esi, killed %edi, implicit-def dead %eflags
- %ecx = MOV32rm %esp, 1, _, 24, _ ; :: (load 4 from %stack.2)
+ %ecx = MOV32rm %esp, 1, %noreg, 24, _ ; :: (load 4 from %stack.2)
%edx = SAR32rCL killed %edx, implicit-def dead %eflags, implicit %cl
TEST32rr killed %edx, %edx, implicit-def %eflags
%cl = SETNEr implicit %eflags
@@ -66,7 +66,7 @@ body: |
bb.2:
liveins: %cl, %eax, %ebp, %esi
- OR32mr %esp, 1, _, 8, _, killed %eax, implicit-def %eflags ; :: (store 4 into %stack.1)
+ OR32mr %esp, 1, %noreg, 8, %noreg, killed %eax, implicit-def %eflags ; :: (store 4 into %stack.1)
%dl = SETNEr implicit %eflags, implicit-def %edx
bb.3:
diff --git a/test/CodeGen/X86/pre-coalesce.mir b/test/CodeGen/X86/pre-coalesce.mir
index 17d447dd097..945a87d750a 100644
--- a/test/CodeGen/X86/pre-coalesce.mir
+++ b/test/CodeGen/X86/pre-coalesce.mir
@@ -83,10 +83,10 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- %0 = MOV64rm %rip, 1, _, @b, _ :: (dereferenceable load 8 from @b)
- %12 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.t0)
+ %0 = MOV64rm %rip, 1, %noreg, @b, %noreg :: (dereferenceable load 8 from @b)
+ %12 = MOV8rm %0, 1, %noreg, 0, %noreg :: (load 1 from %ir.t0)
TEST8rr %12, %12, implicit-def %eflags
- %11 = MOV32rm %rip, 1, _, @a, _ :: (dereferenceable load 4 from @a)
+ %11 = MOV32rm %rip, 1, %noreg, @a, %noreg :: (dereferenceable load 4 from @a)
JNE_1 %bb.1.while.body.preheader, implicit killed %eflags
bb.4:
@@ -101,8 +101,8 @@ body: |
%10 = SHL32ri %10, 5, implicit-def dead %eflags
%10 = ADD32rr %10, %11, implicit-def dead %eflags
%10 = ADD32rr %10, %8, implicit-def dead %eflags
- MOV32mr %rip, 1, _, @a, _, %10 :: (store 4 into @a)
- %12 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.t0)
+ MOV32mr %rip, 1, %noreg, @a, %noreg, %10 :: (store 4 into @a)
+ %12 = MOV8rm %0, 1, %noreg, 0, %noreg :: (load 1 from %ir.t0)
TEST8rr %12, %12, implicit-def %eflags
%11 = COPY %10
JNE_1 %bb.2.while.body, implicit killed %eflags
diff --git a/test/CodeGen/X86/system-intrinsics-xgetbv.ll b/test/CodeGen/X86/system-intrinsics-xgetbv.ll
index a5ba026c8e6..ffabd3a2309 100644
--- a/test/CodeGen/X86/system-intrinsics-xgetbv.ll
+++ b/test/CodeGen/X86/system-intrinsics-xgetbv.ll
@@ -18,4 +18,4 @@ define i64 @test_xgetbv(i32 %in) {
ret i64 %1;
}
-declare i64 @llvm.x86.xgetbv(i32) \ No newline at end of file
+declare i64 @llvm.x86.xgetbv(i32)
diff --git a/test/CodeGen/X86/tail-merge-after-mbp.mir b/test/CodeGen/X86/tail-merge-after-mbp.mir
index 8e0c9135378..042ac72eead 100644
--- a/test/CodeGen/X86/tail-merge-after-mbp.mir
+++ b/test/CodeGen/X86/tail-merge-after-mbp.mir
@@ -5,25 +5,25 @@
# check loop bb.9 is not merged with bb.12
# CHECK: bb.2:
# CHECK-NEXT: successors: %bb.3(0x30000000), %bb.4(0x50000000)
-# CHECK: %rax = MOV64rm %r14, 1, _, 0, _
+# CHECK: %rax = MOV64rm %r14, 1, %noreg, 0, %noreg
# CHECK-NEXT: TEST64rr %rax, %rax
# CHECK-NEXT: JE_1 %bb.3
# CHECK: bb.4:
# CHECK-NEXT: successors: %bb.5(0x30000000), %bb.10(0x50000000)
-# CHECK: CMP64mi8 killed %rax, 1, _, 8, _, 0
+# CHECK: CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0
# CHECK-NEXT: JNE_1 %bb.10
# CHECK: bb.5:
# CHECK-NEXT: successors: %bb.6(0x30000000), %bb.7(0x50000000)
-# CHECK: %rax = MOV64rm %r14, 1, _, 0, _
+# CHECK: %rax = MOV64rm %r14, 1, %noreg, 0, %noreg
# CHECK-NEXT: TEST64rr %rax, %rax
# CHECK-NEXT: JE_1 %bb.6
# CHECK: bb.7
# CHECK-NEXT: successors: %bb.8(0x71555555), %bb.10(0x0eaaaaab)
-# CHECK: CMP64mi8 killed %rax, 1, _, 8, _, 0
+# CHECK: CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0
# CHECK-NEXT: JNE_1 %bb.10
# CHECK: bb.8:
# CHECK-NEXT: successors: %bb.9(0x04000000), %bb.7(0x7c000000)
-# CHECK: %rax = MOV64rm %r14, 1, _, 0, _
+# CHECK: %rax = MOV64rm %r14, 1, %noreg, 0, %noreg
# CHECK-NEXT: TEST64rr %rax, %rax
# CHECK-NEXT: JNE_1 %bb.7
@@ -44,7 +44,7 @@ body: |
bb.7:
successors: %bb.8(0x30000000), %bb.9(0x50000000)
- %rax = MOV64rm %r14, 1, _, 0, _ :: (load 8)
+ %rax = MOV64rm %r14, 1, %noreg, 0, %noreg :: (load 8)
TEST64rr %rax, %rax, implicit-def %eflags
JNE_1 %bb.9, implicit killed %eflags
@@ -57,13 +57,13 @@ body: |
bb.9:
successors: %bb.10(0x30000000), %bb.15(0x50000000)
- CMP64mi8 killed %rax, 1, _, 8, _, 0, implicit-def %eflags :: (load 8)
+ CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0, implicit-def %eflags :: (load 8)
JNE_1 %bb.15, implicit %eflags
bb.10:
successors: %bb.11(0x30000000), %bb.12(0x50000000)
- %rax = MOV64rm %r14, 1, _, 0, _ :: (load 8)
+ %rax = MOV64rm %r14, 1, %noreg, 0, %noreg :: (load 8)
TEST64rr %rax, %rax, implicit-def %eflags
JNE_1 %bb.12, implicit %eflags
@@ -76,13 +76,13 @@ body: |
bb.12:
successors: %bb.13(0x71555555), %bb.15(0x0eaaaaab)
- CMP64mi8 killed %rax, 1, _, 8, _, 0, implicit-def %eflags :: (load 8), (load 8)
+ CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0, implicit-def %eflags :: (load 8), (load 8)
JNE_1 %bb.15, implicit %eflags
bb.13:
successors: %bb.14(0x04000000), %bb.12(0x7c000000)
- %rax = MOV64rm %r14, 1, _, 0, _ :: (load 8)
+ %rax = MOV64rm %r14, 1, %noreg, 0, %noreg :: (load 8)
TEST64rr %rax, %rax, implicit-def %eflags
JNE_1 %bb.12, implicit %eflags